mirror of
https://github.com/ansible-collections/community.docker.git
synced 2026-03-15 11:53:31 +00:00
Initial commit
This commit is contained in:
commit
a1f472244d
356
plugins/connection/docker.py
Normal file
356
plugins/connection/docker.py
Normal file
@ -0,0 +1,356 @@
|
||||
# Based on the chroot connection plugin by Maykel Moya
|
||||
#
|
||||
# (c) 2014, Lorin Hochstein
|
||||
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- Lorin Hochestein
|
||||
- Leendert Brouwer
|
||||
connection: docker
|
||||
short_description: Run tasks in docker containers
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
options:
|
||||
remote_user:
|
||||
description:
|
||||
- The user to execute as inside the container
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
docker_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the docker command line
|
||||
default: ''
|
||||
remote_addr:
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
'''
|
||||
|
||||
import distutils.spawn
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.compat import selectors
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
|
||||
transport = 'community.docker.docker'
|
||||
has_pipelining = True
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
# Note: docker supports running as non-root in some configurations.
|
||||
# (For instance, setting the UNIX socket file to be readable and
|
||||
# writable by a specific UNIX group and then putting users into that
|
||||
# group). Therefore we don't check that the user is root when using
|
||||
# this connection. But if the user is getting a permission denied
|
||||
# error it probably means that docker on their system is only
|
||||
# configured to be connected to by root and they are not running as
|
||||
# root.
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||
|
||||
if 'docker_command' in kwargs:
|
||||
self.docker_cmd = kwargs['docker_command']
|
||||
else:
|
||||
self.docker_cmd = distutils.spawn.find_executable('docker')
|
||||
if not self.docker_cmd:
|
||||
raise AnsibleError("docker command not found in PATH")
|
||||
|
||||
docker_version = self._get_docker_version()
|
||||
if docker_version == u'dev':
|
||||
display.warning(u'Docker version number is "dev". Will assume latest version.')
|
||||
if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'):
|
||||
raise AnsibleError('docker connection type requires docker 1.3 or higher')
|
||||
|
||||
# The remote user we will request from docker (if supported)
|
||||
self.remote_user = None
|
||||
# The actual user which will execute commands in docker (if known)
|
||||
self.actual_user = None
|
||||
|
||||
if self._play_context.remote_user is not None:
|
||||
if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'):
|
||||
# Support for specifying the exec user was added in docker 1.7
|
||||
self.remote_user = self._play_context.remote_user
|
||||
self.actual_user = self.remote_user
|
||||
else:
|
||||
self.actual_user = self._get_docker_remote_user()
|
||||
|
||||
if self.actual_user != self._play_context.remote_user:
|
||||
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
|
||||
.format(docker_version, self.actual_user or u'?'))
|
||||
elif self._display.verbosity > 2:
|
||||
# Since we're not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we don't need to
|
||||
self.actual_user = self._get_docker_remote_user()
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_version(version):
|
||||
return re.sub(u'[^0-9a-zA-Z.]', u'', version)
|
||||
|
||||
def _old_docker_version(self):
|
||||
cmd_args = []
|
||||
if self._play_context.docker_extra_args:
|
||||
cmd_args += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
old_version_subcommand = ['version']
|
||||
|
||||
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
||||
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
|
||||
return old_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _new_docker_version(self):
|
||||
# no result yet, must be newer Docker version
|
||||
cmd_args = []
|
||||
if self._play_context.docker_extra_args:
|
||||
cmd_args += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
|
||||
|
||||
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
||||
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
return new_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _get_docker_version(self):
|
||||
|
||||
cmd, cmd_output, err, returncode = self._old_docker_version()
|
||||
if returncode == 0:
|
||||
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
|
||||
if line.startswith(u'Server version:'): # old docker versions
|
||||
return self._sanitize_version(line.split()[2])
|
||||
|
||||
cmd, cmd_output, err, returncode = self._new_docker_version()
|
||||
if returncode:
|
||||
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
|
||||
|
||||
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
|
||||
|
||||
def _get_docker_remote_user(self):
|
||||
""" Get the default user configured in the docker container """
|
||||
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
out = to_text(out, errors='surrogate_or_strict')
|
||||
|
||||
if p.returncode != 0:
|
||||
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
|
||||
return None
|
||||
|
||||
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||
return out.strip() or u'root'
|
||||
|
||||
def _build_exec_cmd(self, cmd):
|
||||
""" Build the local docker exec command to run cmd on remote_host
|
||||
|
||||
If remote_user is available and is supported by the docker
|
||||
version we are using, it will be provided to docker exec.
|
||||
"""
|
||||
|
||||
local_cmd = [self.docker_cmd]
|
||||
|
||||
if self._play_context.docker_extra_args:
|
||||
local_cmd += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
local_cmd += [b'exec']
|
||||
|
||||
if self.remote_user is not None:
|
||||
local_cmd += [b'-u', self.remote_user]
|
||||
|
||||
# -i is needed to keep stdin open which allows pipelining to work
|
||||
local_cmd += [b'-i', self._play_context.remote_addr] + cmd
|
||||
|
||||
return local_cmd
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
self.actual_user or u'?'), host=self._play_context.remote_addr
|
||||
)
|
||||
self._connected = True
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" Run a command on the docker host """
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
|
||||
|
||||
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr)
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
|
||||
p = subprocess.Popen(
|
||||
local_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b''
|
||||
try:
|
||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self._play_context.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||
|
||||
for key, event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
|
||||
if not chunk:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||
become_output += chunk
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
display.debug("done with docker.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we're choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
'''
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" Transfer a file from local to docker container """
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise AnsibleFileNotFound(
|
||||
"file or module does not exist: %s" % to_native(in_path))
|
||||
|
||||
out_path = shlex_quote(out_path)
|
||||
# Older docker doesn't have native support for copying files into
|
||||
# running containers, so we use docker exec to implement this
|
||||
# Although docker version 1.8 and later provide support, the
|
||||
# owner and group of the files are always set to root
|
||||
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
|
||||
if not os.fstat(in_file.fileno()).st_size:
|
||||
count = ' count=0'
|
||||
else:
|
||||
count = ''
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=in_file,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
|
||||
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
# out_path is the final file path, but docker takes a directory, not a
|
||||
# file path
|
||||
out_dir = os.path.dirname(out_path)
|
||||
|
||||
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
|
||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||
|
||||
if p.returncode != 0:
|
||||
# Older docker doesn't have native support for fetching files command `cp`
|
||||
# If `cp` fails, try to use `dd` instead
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=out_file, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
# Rename if needed
|
||||
if actual_out_path != out_path:
|
||||
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
136
plugins/doc_fragments/docker.py
Normal file
136
plugins/doc_fragments/docker.py
Normal file
@ -0,0 +1,136 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r'''
|
||||
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix://var/run/docker.sock
|
||||
aliases: [ docker_url ]
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: localhost
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_cert:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ssl_version:
|
||||
description:
|
||||
- Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
|
||||
used instead.
|
||||
type: str
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
debug:
|
||||
description:
|
||||
- Debug mode
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
|
||||
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
|
||||
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(pip).
|
||||
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
|
||||
and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
|
||||
|
||||
DOCKER_PY_1_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
requirements:
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
|
||||
install the C(docker) Python module. Note that both modules should *not*
|
||||
be installed at the same time. Also note that when both modules are installed
|
||||
and one of them is uninstalled, the other might no longer function and a
|
||||
reinstall of it is required."
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
|
||||
|
||||
DOCKER_PY_2_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
requirements:
|
||||
- "Python >= 2.7"
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
This module does *not* work with docker-py."
|
||||
'''
|
||||
256
plugins/inventory/docker_machine.py
Normal file
256
plugins/inventory/docker_machine.py
Normal file
@ -0,0 +1,256 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_machine
|
||||
plugin_type: inventory
|
||||
author: Ximon Eighteen (@ximon18)
|
||||
short_description: Docker Machine inventory source
|
||||
requirements:
|
||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Get inventory hosts from Docker Machine.
|
||||
- Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
|
||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||
- The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
|
||||
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: yes
|
||||
choices: ['docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
- With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
|
||||
A warning will be issued for any skipped host if the choice is C(require).
|
||||
- With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
|
||||
A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
|
||||
- With C(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||
- If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
|
||||
type: str
|
||||
choices:
|
||||
- require
|
||||
- require-silently
|
||||
- optional
|
||||
- optional-silently
|
||||
- skip
|
||||
default: require
|
||||
running_required:
|
||||
description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||
type: bool
|
||||
default: yes
|
||||
verbose_output:
|
||||
description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes).
|
||||
type: bool
|
||||
default: yes
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example
|
||||
plugin: docker_machine
|
||||
|
||||
# Example using constructed features to create a group per Docker Machine driver
|
||||
# (https://docs.docker.com/machine/drivers/), e.g.:
|
||||
# $ docker-machine create --driver digitalocean ... mymachine
|
||||
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
||||
# {
|
||||
# ...
|
||||
# "digitalocean": {
|
||||
# "hosts": [
|
||||
# "mymachine"
|
||||
# ]
|
||||
# ...
|
||||
# }
|
||||
strict: no
|
||||
keyed_groups:
|
||||
- separator: ''
|
||||
key: docker_machine_node_attributes.DriverName
|
||||
|
||||
# Example grouping hosts by Digital Machine tag
|
||||
strict: no
|
||||
keyed_groups:
|
||||
- prefix: tag
|
||||
key: 'dm_tags'
|
||||
|
||||
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||
compose:
|
||||
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.utils.display import Display
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using Docker machine as source. '''
|
||||
|
||||
NAME = 'community.docker.docker_machine'
|
||||
|
||||
DOCKER_MACHINE_PATH = None
|
||||
|
||||
def _run_command(self, args):
|
||||
if not self.DOCKER_MACHINE_PATH:
|
||||
try:
|
||||
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
|
||||
except ValueError as e:
|
||||
raise AnsibleError(to_native(e))
|
||||
|
||||
command = [self.DOCKER_MACHINE_PATH]
|
||||
command.extend(args)
|
||||
display.debug('Executing command {0}'.format(command))
|
||||
try:
|
||||
result = subprocess.check_output(command)
|
||||
except subprocess.CalledProcessError as e:
|
||||
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
|
||||
raise e
|
||||
|
||||
return to_text(result).strip()
|
||||
|
||||
def _get_docker_daemon_variables(self, machine_name):
|
||||
'''
|
||||
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||
'''
|
||||
try:
|
||||
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
|
||||
except subprocess.CalledProcessError:
|
||||
# This can happen when the machine is created but provisioning is incomplete
|
||||
return []
|
||||
|
||||
# example output of docker-machine env --shell=sh:
|
||||
# export DOCKER_TLS_VERIFY="1"
|
||||
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
||||
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
||||
# export DOCKER_MACHINE_NAME="routinator"
|
||||
# # Run this command to configure your shell:
|
||||
# # eval $(docker-machine env --shell=bash routinator)
|
||||
|
||||
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||
# with the same name and value but with a dm_ name prefix.
|
||||
vars = []
|
||||
for line in env_lines:
|
||||
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||
if match:
|
||||
env_var_name = match.group(1)
|
||||
env_var_value = match.group(2)
|
||||
vars.append((env_var_name, env_var_value))
|
||||
|
||||
return vars
|
||||
|
||||
def _get_machine_names(self):
|
||||
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
|
||||
# with them.
|
||||
ls_command = ['ls', '-q']
|
||||
if self.get_option('running_required'):
|
||||
ls_command.extend(['--filter', 'state=Running'])
|
||||
|
||||
try:
|
||||
ls_lines = self._run_command(ls_command)
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
return ls_lines.splitlines()
|
||||
|
||||
def _inspect_docker_machine_host(self, node):
|
||||
try:
|
||||
inspect_lines = self._run_command(['inspect', self.node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return json.loads(inspect_lines)
|
||||
|
||||
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
|
||||
if not env_var_tuples:
|
||||
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
|
||||
if daemon_env in ('require', 'require-silently'):
|
||||
if daemon_env == 'require':
|
||||
display.warning('{0}: host will be skipped'.format(warning_prefix))
|
||||
return True
|
||||
else: # 'optional', 'optional-silently'
|
||||
if daemon_env == 'optional':
|
||||
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
|
||||
return False
|
||||
|
||||
def _populate(self):
|
||||
daemon_env = self.get_option('daemon_env')
|
||||
try:
|
||||
for self.node in self._get_machine_names():
|
||||
self.node_attrs = self._inspect_docker_machine_host(self.node)
|
||||
if not self.node_attrs:
|
||||
continue
|
||||
|
||||
machine_name = self.node_attrs['Driver']['MachineName']
|
||||
|
||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||
# that could be used to set environment variables to influence a local Docker client:
|
||||
if daemon_env == 'skip':
|
||||
env_var_tuples = []
|
||||
else:
|
||||
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
||||
continue
|
||||
|
||||
# add an entry in the inventory for this host
|
||||
self.inventory.add_host(machine_name)
|
||||
|
||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||
self.inventory.set_variable(machine_name, 'ansible_host', self.node_attrs['Driver']['IPAddress'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
|
||||
|
||||
# set variables based on Docker Machine tags
|
||||
tags = self.node_attrs['Driver'].get('Tags') or ''
|
||||
self.inventory.set_variable(machine_name, 'dm_tags', tags)
|
||||
|
||||
# set variables based on Docker Machine env variables
|
||||
for kv in env_var_tuples:
|
||||
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
|
||||
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
|
||||
to_native(e), orig_exc=e)
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibility of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith((self.NAME + '.yaml', self.NAME + '.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
255
plugins/inventory/docker_swarm.py
Normal file
255
plugins/inventory/docker_swarm.py
Normal file
@ -0,0 +1,255 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_swarm
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file docker_swarm.[yml|yaml].
|
||||
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
|
||||
I(managers) - all manager nodes; I(leader) - the swarm leader node;
|
||||
I(nonleaders) - all nodes except the swarm leader."
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to
|
||||
recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices: docker_swarm
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- "Use C(unix://var/run/docker.sock) to connect via local socket."
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ docker_url ]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
|
||||
C(EngineVersion))
|
||||
type: bool
|
||||
default: yes
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
|
||||
host server.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ca_cert:
|
||||
description: Use a CA certificate when performing server verification by providing the path to a CA
|
||||
certificate file.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of
|
||||
the server.
|
||||
type: str
|
||||
ssl_version:
|
||||
description: Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by docker-py.
|
||||
type: str
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
|
||||
will be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [ time_out ]
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
|
||||
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
|
||||
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
|
||||
The port always defaults to C(2376).
|
||||
type: bool
|
||||
default: no
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in I(ansible_host_uri)
|
||||
type: int
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example using local docker
|
||||
plugin: docker_swarm
|
||||
docker_host: unix://var/run/docker.sock
|
||||
|
||||
# Minimal example using remote docker
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: yes
|
||||
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: yes
|
||||
ca_cert: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: False
|
||||
keyed_groups:
|
||||
# add e.g. x86_64 hosts to an arch_x86_64 group
|
||||
- prefix: arch
|
||||
key: 'Description.Platform.Architecture'
|
||||
# add e.g. linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: 'Description.Platform.OS'
|
||||
# create a group per node label
|
||||
# e.g. a node labeled w/ "production" ends up in group "label_production"
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'Spec.Labels'
|
||||
prefix: label
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import update_tls_hostname, get_connect_params
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
|
||||
try:
|
||||
import docker
|
||||
HAS_DOCKER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker swarm as source. '''
|
||||
|
||||
NAME = 'community.docker.docker_swarm'
|
||||
|
||||
def _fail(self, msg):
|
||||
raise AnsibleError(msg)
|
||||
|
||||
def _populate(self):
|
||||
raw_params = dict(
|
||||
docker_host=self.get_option('docker_host'),
|
||||
tls=self.get_option('tls'),
|
||||
tls_verify=self.get_option('validate_certs'),
|
||||
key_path=self.get_option('client_key'),
|
||||
cacert_path=self.get_option('ca_cert'),
|
||||
cert_path=self.get_option('client_cert'),
|
||||
tls_hostname=self.get_option('tls_hostname'),
|
||||
api_version=self.get_option('api_version'),
|
||||
timeout=self.get_option('timeout'),
|
||||
ssl_version=self.get_option('ssl_version'),
|
||||
debug=None,
|
||||
)
|
||||
update_tls_hostname(raw_params)
|
||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||
self.client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group('all')
|
||||
self.inventory.add_group('manager')
|
||||
self.inventory.add_group('worker')
|
||||
self.inventory.add_group('leader')
|
||||
self.inventory.add_group('nonleaders')
|
||||
|
||||
if self.get_option('include_host_uri'):
|
||||
if self.get_option('include_host_uri_port'):
|
||||
host_uri_port = str(self.get_option('include_host_uri_port'))
|
||||
elif self.get_option('tls') or self.get_option('validate_certs'):
|
||||
host_uri_port = '2376'
|
||||
else:
|
||||
host_uri_port = '2375'
|
||||
|
||||
try:
|
||||
self.nodes = self.client.nodes.list()
|
||||
for self.node in self.nodes:
|
||||
self.node_attrs = self.client.nodes.get(self.node.id).attrs
|
||||
self.inventory.add_host(self.node_attrs['ID'])
|
||||
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
|
||||
self.node_attrs['Status']['Addr'])
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
|
||||
if 'ManagerStatus' in self.node_attrs:
|
||||
if self.node_attrs['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||
self.node_attrs['Status']['Addr']
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='leader')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
||||
to_native(e))
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith((self.NAME + '.yaml', self.NAME + '.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_DOCKER:
|
||||
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
|
||||
'https://github.com/docker/docker-py.')
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
0
plugins/module_utils/__init__.py
Normal file
0
plugins/module_utils/__init__.py
Normal file
1022
plugins/module_utils/common.py
Normal file
1022
plugins/module_utils/common.py
Normal file
File diff suppressed because it is too large
Load Diff
280
plugins/module_utils/swarm.py
Normal file
280
plugins/module_utils/swarm.py
Normal file
@ -0,0 +1,280 @@
|
||||
# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
|
||||
# (c) Thierry Bouvet (@tbouvet)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import json
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, NotFound
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
|
||||
class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
|
||||
|
||||
def get_swarm_node_id(self):
|
||||
"""
|
||||
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
|
||||
of Docker host the module is executed on
|
||||
:return:
|
||||
NodeID of host or 'None' if not part of Swarm
|
||||
"""
|
||||
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError as exc:
|
||||
self.fail("Failed to get node information for %s" % to_native(exc))
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info['Swarm']['NodeID']:
|
||||
return swarm_info['Swarm']['NodeID']
|
||||
return None
|
||||
|
||||
def check_if_swarm_node(self, node_id=None):
|
||||
"""
|
||||
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
|
||||
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
|
||||
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
|
||||
it is not executed on Swarm manager
|
||||
|
||||
:param node_id: Node identifier
|
||||
:return:
|
||||
bool: True if node is part of Swarm, False otherwise
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError:
|
||||
self.fail("Failed to get host information.")
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info['Swarm']['NodeID']:
|
||||
return True
|
||||
if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
if node_info['ID'] is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_if_swarm_manager(self):
|
||||
"""
|
||||
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
|
||||
is performed. The inspect_swarm() will fail if node is not a manager
|
||||
|
||||
:return: True if node is Swarm Manager, False otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
self.inspect_swarm()
|
||||
return True
|
||||
except APIError:
|
||||
return False
|
||||
|
||||
def fail_task_if_not_swarm_manager(self):
|
||||
"""
|
||||
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
|
||||
"""
|
||||
if not self.check_if_swarm_manager():
|
||||
self.fail("Error running docker swarm module: must run on swarm manager node")
|
||||
|
||||
def check_if_swarm_worker(self):
|
||||
"""
|
||||
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
|
||||
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
|
||||
|
||||
:return: True if node is Swarm Worker, False otherwise
|
||||
"""
|
||||
|
||||
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
|
||||
"""
|
||||
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
|
||||
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
|
||||
host that is not part of Swarm it will fail the playbook
|
||||
|
||||
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
True if node is part of swarm but its state is down, False otherwise
|
||||
"""
|
||||
|
||||
if repeat_check < 1:
|
||||
repeat_check = 1
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
for retry in range(0, repeat_check):
|
||||
if retry > 0:
|
||||
sleep(5)
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
if node_info['Status']['State'] == 'down':
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_node_inspect(self, node_id=None, skip_missing=False):
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about single node
|
||||
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
Single node information structure
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
if node_id is None:
|
||||
self.fail("Failed to get node information.")
|
||||
|
||||
try:
|
||||
node_info = self.inspect_node(node_id=node_id)
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
if exc.status_code == 404:
|
||||
if skip_missing:
|
||||
return None
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
|
||||
if 'ManagerStatus' in node_info:
|
||||
if node_info['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
count_colons = node_info['ManagerStatus']['Addr'].count(":")
|
||||
if count_colons == 1:
|
||||
swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
|
||||
else:
|
||||
swarm_leader_ip = node_info['Status']['Addr']
|
||||
node_info['Status']['Addr'] = swarm_leader_ip
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_inspect(self):
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
|
||||
|
||||
:return:
|
||||
Structure with information about all nodes
|
||||
"""
|
||||
try:
|
||||
node_info = self.nodes()
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_list(self, output='short'):
|
||||
"""
|
||||
Returns list of nodes registered in Swarm
|
||||
|
||||
:param output: Defines format of returned data
|
||||
:return:
|
||||
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
|
||||
if 'output' is 'long' then returns data is list of dict containing the attributes as in
|
||||
output of command 'docker node ls'
|
||||
"""
|
||||
nodes_list = []
|
||||
|
||||
nodes_inspect = self.get_all_nodes_inspect()
|
||||
if nodes_inspect is None:
|
||||
return None
|
||||
|
||||
if output == 'short':
|
||||
for node in nodes_inspect:
|
||||
nodes_list.append(node['Description']['Hostname'])
|
||||
elif output == 'long':
|
||||
for node in nodes_inspect:
|
||||
node_property = {}
|
||||
|
||||
node_property.update({'ID': node['ID']})
|
||||
node_property.update({'Hostname': node['Description']['Hostname']})
|
||||
node_property.update({'Status': node['Status']['State']})
|
||||
node_property.update({'Availability': node['Spec']['Availability']})
|
||||
if 'ManagerStatus' in node:
|
||||
if node['ManagerStatus']['Leader'] is True:
|
||||
node_property.update({'Leader': True})
|
||||
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
|
||||
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
|
||||
|
||||
nodes_list.append(node_property)
|
||||
else:
|
||||
return None
|
||||
|
||||
return nodes_list
|
||||
|
||||
def get_node_name_by_id(self, nodeid):
|
||||
return self.get_node_inspect(nodeid)['Description']['Hostname']
|
||||
|
||||
def get_unlock_key(self):
|
||||
if self.docker_py_version < LooseVersion('2.7.0'):
|
||||
return None
|
||||
return super(AnsibleDockerSwarmClient, self).get_unlock_key()
|
||||
|
||||
def get_service_inspect(self, service_id, skip_missing=False):
|
||||
"""
|
||||
Returns Swarm service info as in 'docker service inspect' command about single service
|
||||
|
||||
:param service_id: service ID or name
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:return:
|
||||
Single service information structure
|
||||
"""
|
||||
try:
|
||||
service_info = self.inspect_service(service_id)
|
||||
except NotFound as exc:
|
||||
if skip_missing is False:
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
else:
|
||||
return None
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
|
||||
self.fail("Error inspecting swarm service: %s" % exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm service: %s" % exc)
|
||||
|
||||
json_str = json.dumps(service_info, ensure_ascii=False)
|
||||
service_info = json.loads(json_str)
|
||||
return service_info
|
||||
1156
plugins/modules/docker_compose.py
Normal file
1156
plugins/modules/docker_compose.py
Normal file
File diff suppressed because it is too large
Load Diff
304
plugins/modules/docker_config.py
Normal file
304
plugins/modules/docker_config.py
Normal file
@ -0,0 +1,304 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_config
|
||||
|
||||
short_description: Manage docker configs.
|
||||
|
||||
|
||||
description:
|
||||
- Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
|
||||
- Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
|
||||
in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
|
||||
unless the I(force) option is set.
|
||||
- Updates to configs are performed by removing the config and creating it again.
|
||||
options:
|
||||
data:
|
||||
description:
|
||||
- The value of the config. Required when state is C(present).
|
||||
type: str
|
||||
data_is_b64:
|
||||
description:
|
||||
- If set to C(true), the data is assumed to be Base64 encoded and will be
|
||||
decoded before being used.
|
||||
- To use binary I(data), it is better to keep it Base64 encoded and let it
|
||||
be decoded by this option.
|
||||
type: bool
|
||||
default: no
|
||||
labels:
|
||||
description:
|
||||
- "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
|
||||
- If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
|
||||
type: dict
|
||||
force:
|
||||
description:
|
||||
- Use with state C(present) to always remove and recreate an existing config.
|
||||
- If C(true), an existing config will be replaced, even if it has not been changed.
|
||||
type: bool
|
||||
default: no
|
||||
name:
|
||||
description:
|
||||
- The name of the config.
|
||||
type: str
|
||||
required: yes
|
||||
state:
|
||||
description:
|
||||
- Set to C(present), if the config should exist, and C(absent), if it should not.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_2_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
|
||||
- "Docker API >= 1.30"
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
- John Hu (@ushuz)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Create config foo (from a file on the control machine)
|
||||
docker_config:
|
||||
name: foo
|
||||
# If the file is JSON or binary, Ansible might modify it (because
|
||||
# it is first decoded and later re-encoded). Base64-encoding the
|
||||
# file directly after reading it prevents this to happen.
|
||||
data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
|
||||
data_is_b64: true
|
||||
state: present
|
||||
|
||||
- name: Change the config data
|
||||
docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Add a new label
|
||||
docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Adding a new label will cause a remove/create of the config
|
||||
two: '2'
|
||||
state: present
|
||||
|
||||
- name: No change
|
||||
docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Even though 'two' is missing, there is no change to the existing config
|
||||
state: present
|
||||
|
||||
- name: Update an existing label
|
||||
docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: monkey # Changing a label will cause a remove/create of the config
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Force the (re-)creation of the config
|
||||
docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
force: yes
|
||||
state: present
|
||||
|
||||
- name: Remove config foo
|
||||
docker_config:
|
||||
name: foo
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
config_id:
|
||||
description:
|
||||
- The ID assigned by Docker to the config object.
|
||||
returned: success and I(state) is C(present)
|
||||
type: str
|
||||
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
|
||||
'''
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
compare_generic,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils._text import to_native, to_bytes
|
||||
|
||||
|
||||
class ConfigManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(ConfigManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
parameters = self.client.module.params
|
||||
self.name = parameters.get('name')
|
||||
self.state = parameters.get('state')
|
||||
self.data = parameters.get('data')
|
||||
if self.data is not None:
|
||||
if parameters.get('data_is_b64'):
|
||||
self.data = base64.b64decode(self.data)
|
||||
else:
|
||||
self.data = to_bytes(self.data)
|
||||
self.labels = parameters.get('labels')
|
||||
self.force = parameters.get('force')
|
||||
self.data_key = None
|
||||
|
||||
def __call__(self):
|
||||
if self.state == 'present':
|
||||
self.data_key = hashlib.sha224(self.data).hexdigest()
|
||||
self.present()
|
||||
elif self.state == 'absent':
|
||||
self.absent()
|
||||
|
||||
def get_config(self):
|
||||
''' Find an existing config. '''
|
||||
try:
|
||||
configs = self.client.configs(filters={'name': self.name})
|
||||
except APIError as exc:
|
||||
self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
|
||||
|
||||
for config in configs:
|
||||
if config['Spec']['Name'] == self.name:
|
||||
return config
|
||||
return None
|
||||
|
||||
def create_config(self):
|
||||
''' Create a new config '''
|
||||
config_id = None
|
||||
# We can't see the data after creation, so adding a label we can use for idempotency check
|
||||
labels = {
|
||||
'ansible_key': self.data_key
|
||||
}
|
||||
if self.labels:
|
||||
labels.update(self.labels)
|
||||
|
||||
try:
|
||||
if not self.check_mode:
|
||||
config_id = self.client.create_config(self.name, self.data, labels=labels)
|
||||
except APIError as exc:
|
||||
self.client.fail("Error creating config: %s" % to_native(exc))
|
||||
|
||||
if isinstance(config_id, dict):
|
||||
config_id = config_id['ID']
|
||||
|
||||
return config_id
|
||||
|
||||
def present(self):
|
||||
''' Handles state == 'present', creating or updating the config '''
|
||||
config = self.get_config()
|
||||
if config:
|
||||
self.results['config_id'] = config['ID']
|
||||
data_changed = False
|
||||
attrs = config.get('Spec', {})
|
||||
if attrs.get('Labels', {}).get('ansible_key'):
|
||||
if attrs['Labels']['ansible_key'] != self.data_key:
|
||||
data_changed = True
|
||||
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
|
||||
if data_changed or labels_changed or self.force:
|
||||
# if something changed or force, delete and re-create the config
|
||||
self.absent()
|
||||
config_id = self.create_config()
|
||||
self.results['changed'] = True
|
||||
self.results['config_id'] = config_id
|
||||
else:
|
||||
self.results['changed'] = True
|
||||
self.results['config_id'] = self.create_config()
|
||||
|
||||
def absent(self):
|
||||
''' Handles state == 'absent', removing the config '''
|
||||
config = self.get_config()
|
||||
if config:
|
||||
try:
|
||||
if not self.check_mode:
|
||||
self.client.remove_config(config['ID'])
|
||||
except APIError as exc:
|
||||
self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc)))
|
||||
self.results['changed'] = True
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
data=dict(type='str'),
|
||||
data_is_b64=dict(type='bool', default=False),
|
||||
labels=dict(type='dict'),
|
||||
force=dict(type='bool', default=False)
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['data'])
|
||||
]
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
min_docker_version='2.6.0',
|
||||
min_docker_api_version='1.30',
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
ConfigManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
3460
plugins/modules/docker_container.py
Normal file
3460
plugins/modules/docker_container.py
Normal file
File diff suppressed because it is too large
Load Diff
150
plugins/modules/docker_container_info.py
Normal file
150
plugins/modules/docker_container_info.py
Normal file
@ -0,0 +1,150 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_container_info
|
||||
|
||||
short_description: Retrieves facts about docker container
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker container.
|
||||
- Essentially returns the output of C(docker inspect <name>), similar to what M(docker_container)
|
||||
returns for a non-absent container.
|
||||
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the container to inspect.
|
||||
- When identifying an existing container name may be a name or a long or short container ID.
|
||||
type: str
|
||||
required: yes
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- "Felix Fontein (@felixfontein)"
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.20"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get infos on container
|
||||
docker_container_info:
|
||||
name: mydata
|
||||
register: result
|
||||
|
||||
- name: Does container exist?
|
||||
debug:
|
||||
msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
|
||||
|
||||
- name: Print information about container
|
||||
debug:
|
||||
var: result.container
|
||||
when: result.exists
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the container exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
container:
|
||||
description:
|
||||
- Facts representing the current state of the container. Matches the docker inspection output.
|
||||
- Will be C(none) if container does not exist.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: '{
|
||||
"AppArmorProfile": "",
|
||||
"Args": [],
|
||||
"Config": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": [
|
||||
"/usr/bin/supervisord"
|
||||
],
|
||||
"Domainname": "",
|
||||
"Entrypoint": null,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"ExposedPorts": {
|
||||
"443/tcp": {},
|
||||
"80/tcp": {}
|
||||
},
|
||||
"Hostname": "8e47bf643eb9",
|
||||
"Image": "lnmp_nginx:v1",
|
||||
"Labels": {},
|
||||
"OnBuild": null,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": {
|
||||
"/tmp/lnmp/nginx-sites/logs/": {}
|
||||
},
|
||||
...
|
||||
}'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_api_version='1.20',
|
||||
)
|
||||
|
||||
try:
|
||||
container = client.get_container(client.module.params['name'])
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
exists=(True if container else False),
|
||||
container=container,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
348
plugins/modules/docker_host_info.py
Normal file
348
plugins/modules/docker_host_info.py
Normal file
@ -0,0 +1,348 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_host_info
|
||||
|
||||
short_description: Retrieves facts about docker host and lists of objects of the services.
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker host.
|
||||
- Essentially returns the output of C(docker system info).
|
||||
- The module also allows to list object names for containers, images, networks and volumes.
|
||||
It also allows to query information on disk usage.
|
||||
- The output differs depending on API version of the docker daemon.
|
||||
- If the docker daemon cannot be contacted or does not meet the API version requirements,
|
||||
the module will fail.
|
||||
|
||||
|
||||
options:
|
||||
containers:
|
||||
description:
|
||||
- Whether to list containers.
|
||||
type: bool
|
||||
default: no
|
||||
containers_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting containers to delete.
|
||||
- "For example, C(until: 24h)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
images:
|
||||
description:
|
||||
- Whether to list images.
|
||||
type: bool
|
||||
default: no
|
||||
images_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting images to delete.
|
||||
- "For example, C(dangling: true)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
networks:
|
||||
description:
|
||||
- Whether to list networks.
|
||||
type: bool
|
||||
default: no
|
||||
networks_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting networks to delete.
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
volumes:
|
||||
description:
|
||||
- Whether to list volumes.
|
||||
type: bool
|
||||
default: no
|
||||
volumes_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting volumes to delete.
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
disk_usage:
|
||||
description:
|
||||
- Summary information on used disk space by all Docker layers.
|
||||
- The output is a sum of images, volumes, containers and build cache.
|
||||
type: bool
|
||||
default: no
|
||||
verbose_output:
|
||||
description:
|
||||
- When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes)
|
||||
then output will contain verbose information about objects matching the full output of API method.
|
||||
For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/).
|
||||
- The verbose output in this module contains only subset of information returned by I(_info) module
|
||||
for each type of the objects.
|
||||
type: bool
|
||||
default: no
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.21"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info on docker host
|
||||
docker_host_info:
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and list images
|
||||
docker_host_info:
|
||||
images: yes
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and list images matching the filter
|
||||
docker_host_info:
|
||||
images: yes
|
||||
images_filters:
|
||||
label: "mylabel"
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and verbose list images
|
||||
docker_host_info:
|
||||
images: yes
|
||||
verbose_output: yes
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and used disk space
|
||||
docker_host_info:
|
||||
disk_usage: yes
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
var: result.host_info
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
can_talk_to_docker:
|
||||
description:
|
||||
- Will be C(true) if the module can talk to the docker daemon.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
|
||||
host_info:
|
||||
description:
|
||||
- Facts representing the basic state of the docker host. Matches the C(docker system info) output.
|
||||
returned: always
|
||||
type: dict
|
||||
volumes:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume.
|
||||
Keys matches the C(docker volume ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(volumes) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
networks:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each network.
|
||||
Keys matches the C(docker network ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(networks) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
containers:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each container.
|
||||
Keys matches the C(docker container ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(containers) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
images:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each image.
|
||||
Keys matches the C(docker image ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(images) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
disk_usage:
|
||||
description:
|
||||
- Information on summary disk usage by images, containers and volumes on docker host
|
||||
unless I(verbose_output=yes). See description for I(verbose_output).
|
||||
returned: When I(disk_usage) is C(yes)
|
||||
type: dict
|
||||
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# Missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import clean_dict_booleans_for_docker_api
|
||||
|
||||
|
||||
class DockerHostManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(DockerHostManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.verbose_output = self.client.module.params['verbose_output']
|
||||
|
||||
listed_objects = ['volumes', 'networks', 'containers', 'images']
|
||||
|
||||
self.results['host_info'] = self.get_docker_host_info()
|
||||
|
||||
if self.client.module.params['disk_usage']:
|
||||
self.results['disk_usage'] = self.get_docker_disk_usage_facts()
|
||||
|
||||
for docker_object in listed_objects:
|
||||
if self.client.module.params[docker_object]:
|
||||
returned_name = docker_object
|
||||
filter_name = docker_object + "_filters"
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
|
||||
self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
|
||||
|
||||
def get_docker_host_info(self):
|
||||
try:
|
||||
return self.client.info()
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
|
||||
|
||||
def get_docker_disk_usage_facts(self):
|
||||
try:
|
||||
if self.verbose_output:
|
||||
return self.client.df()
|
||||
else:
|
||||
return dict(LayersSize=self.client.df()['LayersSize'])
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
|
||||
|
||||
def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
|
||||
items = None
|
||||
items_list = []
|
||||
|
||||
header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
|
||||
header_volumes = ['Driver', 'Name']
|
||||
header_images = ['Id', 'RepoTags', 'Created', 'Size']
|
||||
header_networks = ['Id', 'Driver', 'Name', 'Scope']
|
||||
|
||||
filter_arg = dict()
|
||||
if filters:
|
||||
filter_arg['filters'] = filters
|
||||
try:
|
||||
if docker_object == 'containers':
|
||||
items = self.client.containers(**filter_arg)
|
||||
elif docker_object == 'networks':
|
||||
items = self.client.networks(**filter_arg)
|
||||
elif docker_object == 'images':
|
||||
items = self.client.images(**filter_arg)
|
||||
elif docker_object == 'volumes':
|
||||
items = self.client.volumes(**filter_arg)
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker host for object '%s': %s" %
|
||||
(docker_object, to_native(exc)))
|
||||
|
||||
if self.verbose_output:
|
||||
if docker_object != 'volumes':
|
||||
return items
|
||||
else:
|
||||
return items['Volumes']
|
||||
|
||||
if docker_object == 'volumes':
|
||||
items = items['Volumes']
|
||||
|
||||
for item in items:
|
||||
item_record = dict()
|
||||
|
||||
if docker_object == 'containers':
|
||||
for key in header_containers:
|
||||
item_record[key] = item.get(key)
|
||||
elif docker_object == 'networks':
|
||||
for key in header_networks:
|
||||
item_record[key] = item.get(key)
|
||||
elif docker_object == 'images':
|
||||
for key in header_images:
|
||||
item_record[key] = item.get(key)
|
||||
elif docker_object == 'volumes':
|
||||
for key in header_volumes:
|
||||
item_record[key] = item.get(key)
|
||||
items_list.append(item_record)
|
||||
|
||||
return items_list
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
containers=dict(type='bool', default=False),
|
||||
containers_filters=dict(type='dict'),
|
||||
images=dict(type='bool', default=False),
|
||||
images_filters=dict(type='dict'),
|
||||
networks=dict(type='bool', default=False),
|
||||
networks_filters=dict(type='dict'),
|
||||
volumes=dict(type='bool', default=False),
|
||||
volumes_filters=dict(type='dict'),
|
||||
disk_usage=dict(type='bool', default=False),
|
||||
verbose_output=dict(type='bool', default=False),
|
||||
)
|
||||
|
||||
option_minimal_versions = dict(
|
||||
network_filters=dict(docker_py_version='2.0.2'),
|
||||
disk_usage=dict(docker_py_version='2.2.0'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.21',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
fail_results=dict(
|
||||
can_talk_to_docker=False,
|
||||
),
|
||||
)
|
||||
client.fail_results['can_talk_to_docker'] = True
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
DockerHostManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
953
plugins/modules/docker_image.py
Normal file
953
plugins/modules/docker_image.py
Normal file
@ -0,0 +1,953 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_image
|
||||
|
||||
short_description: Manage docker images.
|
||||
|
||||
|
||||
description:
|
||||
- Build, load or pull an image, making the image available for creating containers. Also supports tagging an
|
||||
image into a repository and archiving an image to a .tar file.
|
||||
- Since Ansible 2.8, it is recommended to explicitly specify the image's source (I(source) can be C(build),
|
||||
C(load), C(pull) or C(local)). This will be required from Ansible 2.12 on.
|
||||
|
||||
options:
|
||||
source:
|
||||
description:
|
||||
- "Determines where the module will try to retrieve the image from."
|
||||
- "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
|
||||
be specified when this value is used."
|
||||
- "Use C(load) to load the image from a C(.tar) file. I(load_path) must
|
||||
be specified when this value is used."
|
||||
- "Use C(pull) to pull the image from a registry."
|
||||
- "Use C(local) to make sure that the image is already available on the local
|
||||
docker daemon, i.e. do not try to build, pull or load the image."
|
||||
- "Before Ansible 2.12, the value of this option will be auto-detected
|
||||
to be backwards compatible, but a warning will be issued if it is not
|
||||
explicitly specified. From Ansible 2.12 on, auto-detection will be disabled
|
||||
and this option will be made mandatory."
|
||||
type: str
|
||||
choices:
|
||||
- build
|
||||
- load
|
||||
- pull
|
||||
- local
|
||||
build:
|
||||
description:
|
||||
- "Specifies options used for building images."
|
||||
type: dict
|
||||
suboptions:
|
||||
cache_from:
|
||||
description:
|
||||
- List of image names to consider as cache source.
|
||||
type: list
|
||||
elements: str
|
||||
dockerfile:
|
||||
description:
|
||||
- Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
|
||||
- This can also include a relative path (relative to I(path)).
|
||||
type: str
|
||||
http_timeout:
|
||||
description:
|
||||
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
|
||||
seconds.
|
||||
type: int
|
||||
path:
|
||||
description:
|
||||
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
|
||||
Dockerfile for building an image.
|
||||
type: path
|
||||
required: yes
|
||||
pull:
|
||||
description:
|
||||
- When building an image downloads any updates to the FROM image in Dockerfile.
|
||||
- The default is currently C(yes). This will change to C(no) in Ansible 2.12.
|
||||
type: bool
|
||||
rm:
|
||||
description:
|
||||
- Remove intermediate containers after build.
|
||||
type: bool
|
||||
default: yes
|
||||
network:
|
||||
description:
|
||||
- The network to use for C(RUN) build instructions.
|
||||
type: str
|
||||
nocache:
|
||||
description:
|
||||
- Do not use cache when building an image.
|
||||
type: bool
|
||||
default: no
|
||||
etc_hosts:
|
||||
description:
|
||||
- Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
|
||||
type: dict
|
||||
args:
|
||||
description:
|
||||
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
|
||||
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
|
||||
- Requires Docker API >= 1.21.
|
||||
type: dict
|
||||
container_limits:
|
||||
description:
|
||||
- A dictionary of limits applied to each container created by the build process.
|
||||
type: dict
|
||||
suboptions:
|
||||
memory:
|
||||
description:
|
||||
- Set memory limit for build.
|
||||
type: int
|
||||
memswap:
|
||||
description:
|
||||
- Total memory (memory + swap), -1 to disable swap.
|
||||
type: int
|
||||
cpushares:
|
||||
description:
|
||||
- CPU shares (relative weight).
|
||||
type: int
|
||||
cpusetcpus:
|
||||
description:
|
||||
- CPUs in which to allow execution, e.g., "0-3", "0,1".
|
||||
type: str
|
||||
use_config_proxy:
|
||||
description:
|
||||
- If set to C(yes) and a proxy configuration is specified in the docker client configuration
|
||||
(by default C($HOME/.docker/config.json)), the corresponding environment variables will
|
||||
be set in the container being built.
|
||||
- Needs Docker SDK for Python >= 3.7.0.
|
||||
type: bool
|
||||
target:
|
||||
description:
|
||||
- When building an image specifies an intermediate build stage by
|
||||
name as a final stage for the resulting image.
|
||||
type: str
|
||||
archive_path:
|
||||
description:
|
||||
- Use with state C(present) to archive an image to a .tar file.
|
||||
type: path
|
||||
load_path:
|
||||
description:
|
||||
- Use with state C(present) to load an image from a .tar file.
|
||||
- Set I(source) to C(load) if you want to load the image. The option will
|
||||
be set automatically before Ansible 2.12 if this option is used (except
|
||||
if I(path) is specified as well, in which case building will take precedence).
|
||||
From Ansible 2.12 on, you have to set I(source) to C(load).
|
||||
type: path
|
||||
dockerfile:
|
||||
description:
|
||||
- Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
|
||||
- This can also include a relative path (relative to I(path)).
|
||||
- Please use I(build.dockerfile) instead. This option will be removed in Ansible 2.12.
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
|
||||
C(present) to build, load or pull an image when the image already exists. Also use with state C(present)
|
||||
to force tagging an image.
|
||||
- Please stop using this option, and use the more specialized force options
|
||||
I(force_source), I(force_absent) and I(force_tag) instead.
|
||||
- This option will be removed in Ansible 2.12.
|
||||
type: bool
|
||||
force_source:
|
||||
description:
|
||||
- Use with state C(present) to build, load or pull an image (depending on the
|
||||
value of the I(source) option) when the image already exists.
|
||||
type: bool
|
||||
default: false
|
||||
force_absent:
|
||||
description:
|
||||
- Use with state I(absent) to un-tag and remove all images matching the specified name.
|
||||
type: bool
|
||||
default: false
|
||||
force_tag:
|
||||
description:
|
||||
- Use with state C(present) to force tagging an image.
|
||||
type: bool
|
||||
default: false
|
||||
http_timeout:
|
||||
description:
|
||||
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
|
||||
seconds.
|
||||
- Please use I(build.http_timeout) instead. This option will be removed in Ansible 2.12.
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
|
||||
When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
|
||||
- Note that image IDs (hashes) are not supported.
|
||||
type: str
|
||||
required: yes
|
||||
path:
|
||||
description:
|
||||
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
|
||||
Dockerfile for building an image.
|
||||
- Set I(source) to C(build) if you want to build the image. The option will
|
||||
be set automatically before Ansible 2.12 if this option is used. From Ansible 2.12
|
||||
on, you have to set I(source) to C(build).
|
||||
- Please use I(build.path) instead. This option will be removed in Ansible 2.12.
|
||||
type: path
|
||||
aliases:
|
||||
- build_path
|
||||
pull:
|
||||
description:
|
||||
- When building an image downloads any updates to the FROM image in Dockerfile.
|
||||
- Please use I(build.pull) instead. This option will be removed in Ansible 2.12.
|
||||
- The default is currently C(yes). This will change to C(no) in Ansible 2.12.
|
||||
type: bool
|
||||
push:
|
||||
description:
|
||||
- Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
|
||||
type: bool
|
||||
default: no
|
||||
rm:
|
||||
description:
|
||||
- Remove intermediate containers after build.
|
||||
- Please use I(build.rm) instead. This option will be removed in Ansible 2.12.
|
||||
type: bool
|
||||
default: yes
|
||||
nocache:
|
||||
description:
|
||||
- Do not use cache when building an image.
|
||||
- Please use I(build.nocache) instead. This option will be removed in Ansible 2.12.
|
||||
type: bool
|
||||
default: no
|
||||
repository:
|
||||
description:
|
||||
- Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
|
||||
format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Make assertions about the state of an image.
|
||||
- When C(absent) an image will be removed. Use the force option to un-tag and remove all images
|
||||
matching the provided name.
|
||||
- When C(present) check if an image exists using the provided name and tag. If the image is not found or the
|
||||
force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
|
||||
- By default the image will be pulled from Docker Hub, or the registry specified in the image's name. Note that
|
||||
this will change in Ansible 2.12, so to make sure that you are pulling, set I(source) to C(pull). To build
|
||||
the image, provide a I(path) value set to a directory containing a context and Dockerfile, and set I(source)
|
||||
to C(build). To load an image, specify I(load_path) to provide a path to an archive file. To tag an image to
|
||||
a repository, provide a I(repository) path. If the name contains a repository path, it will be pushed.
|
||||
- "*Note:* C(state=build) is DEPRECATED and will be removed in Ansible 2.11. Specifying C(build) will behave the
|
||||
same as C(present)."
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
- build
|
||||
tag:
|
||||
description:
|
||||
- Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
|
||||
I(latest).
|
||||
- If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
|
||||
type: str
|
||||
default: latest
|
||||
buildargs:
|
||||
description:
|
||||
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
|
||||
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
|
||||
- Requires Docker API >= 1.21.
|
||||
- Please use I(build.args) instead. This option will be removed in Ansible 2.12.
|
||||
type: dict
|
||||
container_limits:
|
||||
description:
|
||||
- A dictionary of limits applied to each container created by the build process.
|
||||
- Please use I(build.container_limits) instead. This option will be removed in Ansible 2.12.
|
||||
type: dict
|
||||
suboptions:
|
||||
memory:
|
||||
description:
|
||||
- Set memory limit for build.
|
||||
type: int
|
||||
memswap:
|
||||
description:
|
||||
- Total memory (memory + swap), -1 to disable swap.
|
||||
type: int
|
||||
cpushares:
|
||||
description:
|
||||
- CPU shares (relative weight).
|
||||
type: int
|
||||
cpusetcpus:
|
||||
description:
|
||||
- CPUs in which to allow execution, e.g., "0-3", "0,1".
|
||||
type: str
|
||||
use_tls:
|
||||
description:
|
||||
- "DEPRECATED. Whether to use tls to connect to the docker daemon. Set to
|
||||
C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that
|
||||
the server's certificate is valid for the server."
|
||||
- "*Note:* If you specify this option, it will set the value of the I(tls) or
|
||||
I(validate_certs) parameters if not set to C(no)."
|
||||
- Will be removed in Ansible 2.11.
|
||||
type: str
|
||||
choices:
|
||||
- 'no'
|
||||
- 'encrypt'
|
||||
- 'verify'
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.20"
|
||||
|
||||
author:
|
||||
- Pavel Antonov (@softzilla)
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
- Sorin Sbarnea (@ssbarnea)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: pull an image
|
||||
docker_image:
|
||||
name: pacur/centos-7
|
||||
source: pull
|
||||
|
||||
- name: Tag and push to docker hub
|
||||
docker_image:
|
||||
name: pacur/centos-7:56
|
||||
repository: dcoppenhagan/myimage:7.56
|
||||
push: yes
|
||||
source: local
|
||||
|
||||
- name: Tag and push to local registry
|
||||
docker_image:
|
||||
# Image will be centos:7
|
||||
name: centos
|
||||
# Will be pushed to localhost:5000/centos:7
|
||||
repository: localhost:5000/centos
|
||||
tag: 7
|
||||
push: yes
|
||||
source: local
|
||||
|
||||
- name: Add tag latest to image
|
||||
docker_image:
|
||||
name: myimage:7.1.2
|
||||
repository: myimage:latest
|
||||
# As 'latest' usually already is present, we need to enable overwriting of existing tags:
|
||||
force_tag: yes
|
||||
source: local
|
||||
|
||||
- name: Remove image
|
||||
docker_image:
|
||||
state: absent
|
||||
name: registry.ansible.com/chouseknecht/sinatra
|
||||
tag: v1
|
||||
|
||||
- name: Build an image and push it to a private repo
|
||||
docker_image:
|
||||
build:
|
||||
path: ./sinatra
|
||||
name: registry.ansible.com/chouseknecht/sinatra
|
||||
tag: v1
|
||||
push: yes
|
||||
source: build
|
||||
|
||||
- name: Archive image
|
||||
docker_image:
|
||||
name: registry.ansible.com/chouseknecht/sinatra
|
||||
tag: v1
|
||||
archive_path: my_sinatra.tar
|
||||
source: local
|
||||
|
||||
- name: Load image from archive and push to a private registry
|
||||
docker_image:
|
||||
name: localhost:5000/myimages/sinatra
|
||||
tag: v1
|
||||
push: yes
|
||||
load_path: my_sinatra.tar
|
||||
source: load
|
||||
|
||||
- name: Build image and with build args
|
||||
docker_image:
|
||||
name: myimage
|
||||
build:
|
||||
path: /path/to/build/dir
|
||||
args:
|
||||
log_volume: /var/log/myapp
|
||||
listen_port: 8080
|
||||
source: build
|
||||
|
||||
- name: Build image using cache source
|
||||
docker_image:
|
||||
name: myimage:latest
|
||||
build:
|
||||
path: /path/to/build/dir
|
||||
# Use as cache source for building myimage
|
||||
cache_from:
|
||||
- nginx:latest
|
||||
- alpine:3.8
|
||||
source: build
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
image:
|
||||
description: Image inspection results for the affected image.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
'''
|
||||
|
||||
import errno
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
clean_dict_booleans_for_docker_api,
|
||||
docker_version,
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
is_valid_tag,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
if docker_version is not None:
|
||||
try:
|
||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
from docker.auth import resolve_repository_name
|
||||
else:
|
||||
from docker.auth.auth import resolve_repository_name
|
||||
from docker.utils.utils import parse_repository_tag
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
class ImageManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(ImageManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
parameters = self.client.module.params
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.source = parameters['source']
|
||||
build = parameters['build'] or dict()
|
||||
self.archive_path = parameters.get('archive_path')
|
||||
self.cache_from = build.get('cache_from')
|
||||
self.container_limits = build.get('container_limits')
|
||||
self.dockerfile = build.get('dockerfile')
|
||||
self.force_source = parameters.get('force_source')
|
||||
self.force_absent = parameters.get('force_absent')
|
||||
self.force_tag = parameters.get('force_tag')
|
||||
self.load_path = parameters.get('load_path')
|
||||
self.name = parameters.get('name')
|
||||
self.network = build.get('network')
|
||||
self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts'))
|
||||
self.nocache = build.get('nocache', False)
|
||||
self.build_path = build.get('path')
|
||||
self.pull = build.get('pull')
|
||||
self.target = build.get('target')
|
||||
self.repository = parameters.get('repository')
|
||||
self.rm = build.get('rm', True)
|
||||
self.state = parameters.get('state')
|
||||
self.tag = parameters.get('tag')
|
||||
self.http_timeout = build.get('http_timeout')
|
||||
self.push = parameters.get('push')
|
||||
self.buildargs = build.get('args')
|
||||
self.use_config_proxy = build.get('use_config_proxy')
|
||||
|
||||
# If name contains a tag, it takes precedence over tag parameter.
|
||||
if not is_image_name_id(self.name):
|
||||
repo, repo_tag = parse_repository_tag(self.name)
|
||||
if repo_tag:
|
||||
self.name = repo
|
||||
self.tag = repo_tag
|
||||
|
||||
if self.state == 'present':
|
||||
self.present()
|
||||
elif self.state == 'absent':
|
||||
self.absent()
|
||||
|
||||
def fail(self, msg):
|
||||
self.client.fail(msg)
|
||||
|
||||
def present(self):
|
||||
'''
|
||||
Handles state = 'present', which includes building, loading or pulling an image,
|
||||
depending on user provided parameters.
|
||||
|
||||
:returns None
|
||||
'''
|
||||
image = self.client.find_image(name=self.name, tag=self.tag)
|
||||
|
||||
if not image or self.force_source:
|
||||
if self.source == 'build':
|
||||
# Build the image
|
||||
if not os.path.isdir(self.build_path):
|
||||
self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
|
||||
image_name = self.name
|
||||
if self.tag:
|
||||
image_name = "%s:%s" % (self.name, self.tag)
|
||||
self.log("Building image %s" % image_name)
|
||||
self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
self.results['image'] = self.build_image()
|
||||
elif self.source == 'load':
|
||||
# Load the image from an archive
|
||||
if not os.path.isfile(self.load_path):
|
||||
self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
|
||||
self.load_path))
|
||||
image_name = self.name
|
||||
if self.tag:
|
||||
image_name = "%s:%s" % (self.name, self.tag)
|
||||
self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
self.results['image'] = self.load_image()
|
||||
elif self.source == 'pull':
|
||||
# pull the image
|
||||
self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag)
|
||||
elif self.source == 'local':
|
||||
if image is None:
|
||||
name = self.name
|
||||
if self.tag:
|
||||
name = "%s:%s" % (self.name, self.tag)
|
||||
self.client.fail('Cannot find the image %s locally.' % name)
|
||||
if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
|
||||
self.results['changed'] = False
|
||||
|
||||
if self.archive_path:
|
||||
self.archive_image(self.name, self.tag)
|
||||
|
||||
if self.push and not self.repository:
|
||||
self.push_image(self.name, self.tag)
|
||||
elif self.repository:
|
||||
self.tag_image(self.name, self.tag, self.repository, push=self.push)
|
||||
|
||||
def absent(self):
|
||||
'''
|
||||
Handles state = 'absent', which removes an image.
|
||||
|
||||
:return None
|
||||
'''
|
||||
name = self.name
|
||||
if is_image_name_id(name):
|
||||
image = self.client.find_image_by_id(name)
|
||||
else:
|
||||
image = self.client.find_image(name, self.tag)
|
||||
if self.tag:
|
||||
name = "%s:%s" % (self.name, self.tag)
|
||||
if image:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.remove_image(name, force=self.force_absent)
|
||||
except Exception as exc:
|
||||
self.fail("Error removing image %s - %s" % (name, str(exc)))
|
||||
|
||||
self.results['changed'] = True
|
||||
self.results['actions'].append("Removed image %s" % (name))
|
||||
self.results['image']['state'] = 'Deleted'
|
||||
|
||||
def archive_image(self, name, tag):
|
||||
'''
|
||||
Archive an image to a .tar file. Called when archive_path is passed.
|
||||
|
||||
:param name - name of the image. Type: str
|
||||
:return None
|
||||
'''
|
||||
|
||||
if not tag:
|
||||
tag = "latest"
|
||||
|
||||
image = self.client.find_image(name=name, tag=tag)
|
||||
if not image:
|
||||
self.log("archive image: image %s:%s not found" % (name, tag))
|
||||
return
|
||||
|
||||
image_name = "%s:%s" % (name, tag)
|
||||
self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
self.log("Getting archive of image %s" % image_name)
|
||||
try:
|
||||
image = self.client.get_image(image_name)
|
||||
except Exception as exc:
|
||||
self.fail("Error getting image %s - %s" % (image_name, str(exc)))
|
||||
|
||||
try:
|
||||
with open(self.archive_path, 'wb') as fd:
|
||||
if self.client.docker_py_version >= LooseVersion('3.0.0'):
|
||||
for chunk in image:
|
||||
fd.write(chunk)
|
||||
else:
|
||||
for chunk in image.stream(2048, decode_content=False):
|
||||
fd.write(chunk)
|
||||
except Exception as exc:
|
||||
self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
|
||||
|
||||
image = self.client.find_image(name=name, tag=tag)
|
||||
if image:
|
||||
self.results['image'] = image
|
||||
|
||||
def push_image(self, name, tag=None):
|
||||
'''
|
||||
If the name of the image contains a repository path, then push the image.
|
||||
|
||||
:param name Name of the image to push.
|
||||
:param tag Use a specific tag.
|
||||
:return: None
|
||||
'''
|
||||
|
||||
repository = name
|
||||
if not tag:
|
||||
repository, tag = parse_repository_tag(name)
|
||||
registry, repo_name = resolve_repository_name(repository)
|
||||
|
||||
self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
|
||||
|
||||
if registry:
|
||||
self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
status = None
|
||||
try:
|
||||
changed = False
|
||||
for line in self.client.push(repository, tag=tag, stream=True, decode=True):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get('errorDetail'):
|
||||
raise Exception(line['errorDetail']['message'])
|
||||
status = line.get('status')
|
||||
if status == 'Pushing':
|
||||
changed = True
|
||||
self.results['changed'] = changed
|
||||
except Exception as exc:
|
||||
if re.search('unauthorized', str(exc)):
|
||||
if re.search('authentication required', str(exc)):
|
||||
self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
|
||||
(registry, repo_name, tag, str(exc), registry))
|
||||
else:
|
||||
self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
|
||||
(registry, repo_name, tag, str(exc)))
|
||||
self.fail("Error pushing image %s: %s" % (repository, str(exc)))
|
||||
self.results['image'] = self.client.find_image(name=repository, tag=tag)
|
||||
if not self.results['image']:
|
||||
self.results['image'] = dict()
|
||||
self.results['image']['push_status'] = status
|
||||
|
||||
def tag_image(self, name, tag, repository, push=False):
|
||||
'''
|
||||
Tag an image into a repository.
|
||||
|
||||
:param name: name of the image. required.
|
||||
:param tag: image tag.
|
||||
:param repository: path to the repository. required.
|
||||
:param push: bool. push the image once it's tagged.
|
||||
:return: None
|
||||
'''
|
||||
repo, repo_tag = parse_repository_tag(repository)
|
||||
if not repo_tag:
|
||||
repo_tag = "latest"
|
||||
if tag:
|
||||
repo_tag = tag
|
||||
image = self.client.find_image(name=repo, tag=repo_tag)
|
||||
found = 'found' if image else 'not found'
|
||||
self.log("image %s was %s" % (repo, found))
|
||||
|
||||
if not image or self.force_tag:
|
||||
self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
|
||||
self.results['changed'] = True
|
||||
self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
|
||||
if not self.check_mode:
|
||||
try:
|
||||
# Finding the image does not always work, especially running a localhost registry. In those
|
||||
# cases, if we don't set force=True, it errors.
|
||||
image_name = name
|
||||
if tag and not re.search(tag, name):
|
||||
image_name = "%s:%s" % (name, tag)
|
||||
tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
|
||||
if not tag_status:
|
||||
raise Exception("Tag operation failed.")
|
||||
except Exception as exc:
|
||||
self.fail("Error: failed to tag image - %s" % str(exc))
|
||||
self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
|
||||
if image and image['Id'] == self.results['image']['Id']:
|
||||
self.results['changed'] = False
|
||||
|
||||
if push:
|
||||
self.push_image(repo, repo_tag)
|
||||
|
||||
def build_image(self):
|
||||
'''
|
||||
Build an image
|
||||
|
||||
:return: image dict
|
||||
'''
|
||||
params = dict(
|
||||
path=self.build_path,
|
||||
tag=self.name,
|
||||
rm=self.rm,
|
||||
nocache=self.nocache,
|
||||
timeout=self.http_timeout,
|
||||
pull=self.pull,
|
||||
forcerm=self.rm,
|
||||
dockerfile=self.dockerfile,
|
||||
decode=True,
|
||||
)
|
||||
if self.client.docker_py_version < LooseVersion('3.0.0'):
|
||||
params['stream'] = True
|
||||
build_output = []
|
||||
if self.tag:
|
||||
params['tag'] = "%s:%s" % (self.name, self.tag)
|
||||
if self.container_limits:
|
||||
params['container_limits'] = self.container_limits
|
||||
if self.buildargs:
|
||||
for key, value in self.buildargs.items():
|
||||
self.buildargs[key] = to_native(value)
|
||||
params['buildargs'] = self.buildargs
|
||||
if self.cache_from:
|
||||
params['cache_from'] = self.cache_from
|
||||
if self.network:
|
||||
params['network_mode'] = self.network
|
||||
if self.extra_hosts:
|
||||
params['extra_hosts'] = self.extra_hosts
|
||||
if self.use_config_proxy:
|
||||
params['use_config_proxy'] = self.use_config_proxy
|
||||
# Due to a bug in docker-py, it will crash if
|
||||
# use_config_proxy is True and buildargs is None
|
||||
if 'buildargs' not in params:
|
||||
params['buildargs'] = {}
|
||||
if self.target:
|
||||
params['target'] = self.target
|
||||
|
||||
for line in self.client.build(**params):
|
||||
# line = json.loads(line)
|
||||
self.log(line, pretty_print=True)
|
||||
if "stream" in line:
|
||||
build_output.append(line["stream"])
|
||||
if line.get('error'):
|
||||
if line.get('errorDetail'):
|
||||
errorDetail = line.get('errorDetail')
|
||||
self.fail(
|
||||
"Error building %s - code: %s, message: %s, logs: %s" % (
|
||||
self.name,
|
||||
errorDetail.get('code'),
|
||||
errorDetail.get('message'),
|
||||
build_output))
|
||||
else:
|
||||
self.fail("Error building %s - message: %s, logs: %s" % (
|
||||
self.name, line.get('error'), build_output))
|
||||
return self.client.find_image(name=self.name, tag=self.tag)
|
||||
|
||||
def load_image(self):
|
||||
'''
|
||||
Load an image from a .tar archive
|
||||
|
||||
:return: image dict
|
||||
'''
|
||||
try:
|
||||
self.log("Opening image %s" % self.load_path)
|
||||
with open(self.load_path, 'rb') as image_tar:
|
||||
self.log("Loading image from %s" % self.load_path)
|
||||
self.client.load_image(image_tar)
|
||||
except EnvironmentError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
self.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
|
||||
self.fail("Error loading image %s - %s" % (self.name, str(exc)))
|
||||
except Exception as exc:
|
||||
self.fail("Error loading image %s - %s" % (self.name, str(exc)))
|
||||
|
||||
return self.client.find_image(self.name, self.tag)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
|
||||
build=dict(type='dict', options=dict(
|
||||
cache_from=dict(type='list', elements='str'),
|
||||
container_limits=dict(type='dict', options=dict(
|
||||
memory=dict(type='int'),
|
||||
memswap=dict(type='int'),
|
||||
cpushares=dict(type='int'),
|
||||
cpusetcpus=dict(type='str'),
|
||||
)),
|
||||
dockerfile=dict(type='str'),
|
||||
http_timeout=dict(type='int'),
|
||||
network=dict(type='str'),
|
||||
nocache=dict(type='bool', default=False),
|
||||
path=dict(type='path', required=True),
|
||||
pull=dict(type='bool'),
|
||||
rm=dict(type='bool', default=True),
|
||||
args=dict(type='dict'),
|
||||
use_config_proxy=dict(type='bool'),
|
||||
target=dict(type='str'),
|
||||
etc_hosts=dict(type='dict'),
|
||||
)),
|
||||
archive_path=dict(type='path'),
|
||||
container_limits=dict(type='dict', options=dict(
|
||||
memory=dict(type='int'),
|
||||
memswap=dict(type='int'),
|
||||
cpushares=dict(type='int'),
|
||||
cpusetcpus=dict(type='str'),
|
||||
), removed_in_version='2.12'),
|
||||
dockerfile=dict(type='str', removed_in_version='2.12'),
|
||||
force=dict(type='bool', removed_in_version='2.12'),
|
||||
force_source=dict(type='bool', default=False),
|
||||
force_absent=dict(type='bool', default=False),
|
||||
force_tag=dict(type='bool', default=False),
|
||||
http_timeout=dict(type='int', removed_in_version='2.12'),
|
||||
load_path=dict(type='path'),
|
||||
name=dict(type='str', required=True),
|
||||
nocache=dict(type='bool', default=False, removed_in_version='2.12'),
|
||||
path=dict(type='path', aliases=['build_path'], removed_in_version='2.12'),
|
||||
pull=dict(type='bool', removed_in_version='2.12'),
|
||||
push=dict(type='bool', default=False),
|
||||
repository=dict(type='str'),
|
||||
rm=dict(type='bool', default=True, removed_in_version='2.12'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present', 'build']),
|
||||
tag=dict(type='str', default='latest'),
|
||||
use_tls=dict(type='str', choices=['no', 'encrypt', 'verify'], removed_in_version='2.11'),
|
||||
buildargs=dict(type='dict', removed_in_version='2.12'),
|
||||
)
|
||||
|
||||
required_if = [
|
||||
# ('state', 'present', ['source']), -- enable in Ansible 2.12.
|
||||
# ('source', 'build', ['build']), -- enable in Ansible 2.12.
|
||||
('source', 'load', ['load_path']),
|
||||
]
|
||||
|
||||
def detect_build_cache_from(client):
|
||||
return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None
|
||||
|
||||
def detect_build_network(client):
|
||||
return client.module.params['build'] and client.module.params['build'].get('network') is not None
|
||||
|
||||
def detect_build_target(client):
|
||||
return client.module.params['build'] and client.module.params['build'].get('target') is not None
|
||||
|
||||
def detect_use_config_proxy(client):
|
||||
return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None
|
||||
|
||||
def detect_etc_hosts(client):
|
||||
return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts'))
|
||||
|
||||
option_minimal_versions = dict()
|
||||
option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from)
|
||||
option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network)
|
||||
option_minimal_versions["build.target"] = dict(docker_py_version='2.4.0', detect_usage=detect_build_target)
|
||||
option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy)
|
||||
option_minimal_versions["build.etc_hosts"] = dict(docker_py_version='2.6.0', docker_api_version='1.27', detect_usage=detect_etc_hosts)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
required_if=required_if,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.8.0',
|
||||
min_docker_api_version='1.20',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
if client.module.params['state'] == 'build':
|
||||
client.module.warn('The "build" state has been deprecated for a long time '
|
||||
'and will be removed in Ansible 2.11. Please use '
|
||||
'"present", which has the same meaning as "build".')
|
||||
client.module.params['state'] = 'present'
|
||||
if client.module.params['use_tls']:
|
||||
client.module.warn('The "use_tls" option has been deprecated for a long time '
|
||||
'and will be removed in Ansible 2.11. Please use the'
|
||||
'"tls" and "validate_certs" options instead.')
|
||||
|
||||
if not is_valid_tag(client.module.params['tag'], allow_empty=True):
|
||||
client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
|
||||
|
||||
build_options = dict(
|
||||
container_limits='container_limits',
|
||||
dockerfile='dockerfile',
|
||||
http_timeout='http_timeout',
|
||||
nocache='nocache',
|
||||
path='path',
|
||||
pull='pull',
|
||||
rm='rm',
|
||||
buildargs='args',
|
||||
)
|
||||
for option, build_option in build_options.items():
|
||||
default_value = None
|
||||
if option in ('rm', ):
|
||||
default_value = True
|
||||
elif option in ('nocache', ):
|
||||
default_value = False
|
||||
if client.module.params[option] != default_value:
|
||||
if client.module.params['build'] is None:
|
||||
client.module.params['build'] = dict()
|
||||
if client.module.params['build'].get(build_option, default_value) != default_value:
|
||||
client.fail('Cannot specify both %s and build.%s!' % (option, build_option))
|
||||
client.module.params['build'][build_option] = client.module.params[option]
|
||||
client.module.warn('Please specify build.%s instead of %s. The %s option '
|
||||
'has been renamed and will be removed in Ansible 2.12.' % (build_option, option, option))
|
||||
if client.module.params['source'] == 'build':
|
||||
if (not client.module.params['build'] or not client.module.params['build'].get('path')):
|
||||
client.fail('If "source" is set to "build", the "build.path" option must be specified.')
|
||||
if client.module.params['build'].get('pull') is None:
|
||||
client.module.warn("The default for build.pull is currently 'yes', but will be changed to 'no' in Ansible 2.12. "
|
||||
"Please set build.pull explicitly to the value you need.")
|
||||
client.module.params['build']['pull'] = True # TODO: change to False in Ansible 2.12
|
||||
|
||||
if client.module.params['state'] == 'present' and client.module.params['source'] is None:
|
||||
# Autodetection. To be removed in Ansible 2.12.
|
||||
if (client.module.params['build'] or dict()).get('path'):
|
||||
client.module.params['source'] = 'build'
|
||||
elif client.module.params['load_path']:
|
||||
client.module.params['source'] = 'load'
|
||||
else:
|
||||
client.module.params['source'] = 'pull'
|
||||
client.module.warn('The value of the "source" option was determined to be "%s". '
|
||||
'Please set the "source" option explicitly. Autodetection will '
|
||||
'be removed in Ansible 2.12.' % client.module.params['source'])
|
||||
|
||||
if client.module.params['force']:
|
||||
client.module.params['force_source'] = True
|
||||
client.module.params['force_absent'] = True
|
||||
client.module.params['force_tag'] = True
|
||||
client.module.warn('The "force" option will be removed in Ansible 2.12. Please '
|
||||
'use the "force_source", "force_absent" or "force_tag" option '
|
||||
'instead, depending on what you want to force.')
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
actions=[],
|
||||
image={}
|
||||
)
|
||||
|
||||
ImageManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1
plugins/modules/docker_image_facts.py
Symbolic link
1
plugins/modules/docker_image_facts.py
Symbolic link
@ -0,0 +1 @@
|
||||
docker_image_info.py
|
||||
274
plugins/modules/docker_image_info.py
Normal file
274
plugins/modules/docker_image_info.py
Normal file
@ -0,0 +1,274 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_image_info
|
||||
|
||||
short_description: Inspect docker images
|
||||
|
||||
|
||||
description:
|
||||
- Provide one or more image names, and the module will inspect each, returning an array of inspection results.
|
||||
- If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
|
||||
locally, you can call the module with the image name, then check whether the result list is empty (image does not
|
||||
exist) or has one element (the image exists locally).
|
||||
- The module will not attempt to pull images from registries. Use M(docker_image) with I(source) set to C(pull)
|
||||
to ensure an image is pulled.
|
||||
|
||||
notes:
|
||||
- This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
|
||||
where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
|
||||
image IDs can be used.
|
||||
- If no name is provided, a list of all images will be returned.
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.20"
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Inspect a single image
|
||||
docker_image_info:
|
||||
name: pacur/centos-7
|
||||
|
||||
- name: Inspect multiple images
|
||||
docker_image_info:
|
||||
name:
|
||||
- pacur/centos-7
|
||||
- sinatra
|
||||
register: result
|
||||
|
||||
- name: Make sure that both images pacur/centos-7 and sinatra exist locally
|
||||
assert:
|
||||
that:
|
||||
- result.images | length == 2
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
images:
|
||||
description:
|
||||
- Inspection results for the selected images.
|
||||
- The list only contains inspection results of images existing locally.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
sample: [
|
||||
{
|
||||
"Architecture": "amd64",
|
||||
"Author": "",
|
||||
"Comment": "",
|
||||
"Config": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": [
|
||||
"/etc/docker/registry/config.yml"
|
||||
],
|
||||
"Domainname": "",
|
||||
"Entrypoint": [
|
||||
"/bin/registry"
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"ExposedPorts": {
|
||||
"5000/tcp": {}
|
||||
},
|
||||
"Hostname": "e5c68db50333",
|
||||
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
|
||||
"Labels": {},
|
||||
"OnBuild": [],
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": {
|
||||
"/var/lib/registry": {}
|
||||
},
|
||||
"WorkingDir": ""
|
||||
},
|
||||
"Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
|
||||
"ContainerConfig": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
'#(nop) CMD ["/etc/docker/registry/config.yml"]'
|
||||
],
|
||||
"Domainname": "",
|
||||
"Entrypoint": [
|
||||
"/bin/registry"
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"ExposedPorts": {
|
||||
"5000/tcp": {}
|
||||
},
|
||||
"Hostname": "e5c68db50333",
|
||||
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
|
||||
"Labels": {},
|
||||
"OnBuild": [],
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": {
|
||||
"/var/lib/registry": {}
|
||||
},
|
||||
"WorkingDir": ""
|
||||
},
|
||||
"Created": "2016-03-08T21:08:15.399680378Z",
|
||||
"DockerVersion": "1.9.1",
|
||||
"GraphDriver": {
|
||||
"Data": null,
|
||||
"Name": "aufs"
|
||||
},
|
||||
"Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
|
||||
"Name": "registry:2",
|
||||
"Os": "linux",
|
||||
"Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
|
||||
"RepoDigests": [],
|
||||
"RepoTags": [
|
||||
"registry:2"
|
||||
],
|
||||
"Size": 0,
|
||||
"VirtualSize": 165808884
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker import utils
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
class ImageManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(ImageManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.name = self.client.module.params.get('name')
|
||||
self.log("Gathering facts for images: %s" % (str(self.name)))
|
||||
|
||||
if self.name:
|
||||
self.results['images'] = self.get_facts()
|
||||
else:
|
||||
self.results['images'] = self.get_all_images()
|
||||
|
||||
def fail(self, msg):
|
||||
self.client.fail(msg)
|
||||
|
||||
def get_facts(self):
|
||||
'''
|
||||
Lookup and inspect each image name found in the names parameter.
|
||||
|
||||
:returns array of image dictionaries
|
||||
'''
|
||||
|
||||
results = []
|
||||
|
||||
names = self.name
|
||||
if not isinstance(names, list):
|
||||
names = [names]
|
||||
|
||||
for name in names:
|
||||
if is_image_name_id(name):
|
||||
self.log('Fetching image %s (ID)' % (name))
|
||||
image = self.client.find_image_by_id(name)
|
||||
else:
|
||||
repository, tag = utils.parse_repository_tag(name)
|
||||
if not tag:
|
||||
tag = 'latest'
|
||||
self.log('Fetching image %s:%s' % (repository, tag))
|
||||
image = self.client.find_image(name=repository, tag=tag)
|
||||
if image:
|
||||
results.append(image)
|
||||
return results
|
||||
|
||||
def get_all_images(self):
|
||||
results = []
|
||||
images = self.client.images()
|
||||
for image in images:
|
||||
try:
|
||||
inspection = self.client.inspect_image(image['Id'])
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
|
||||
results.append(inspection)
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='list', elements='str'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_api_version='1.20',
|
||||
)
|
||||
if client.module._name == 'docker_image_facts':
|
||||
client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'", version='2.12')
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
images=[]
|
||||
)
|
||||
|
||||
ImageManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
490
plugins/modules/docker_login.py
Normal file
490
plugins/modules/docker_login.py
Normal file
@ -0,0 +1,490 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
|
||||
# Chris Houseknecht, <house@redhat.com>
|
||||
# James Tanner, <jtanner@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_login
|
||||
short_description: Log into a Docker registry.
|
||||
description:
|
||||
- Provides functionality similar to the "docker login" command.
|
||||
- Authenticate with a docker registry and add the credentials to your local Docker config file respectively the
|
||||
credentials store associated to the registry. Adding the credentials to the config files resp. the credential
|
||||
store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI
|
||||
and Docker SDK for Python without needing to provide credentials.
|
||||
- Running in check mode will perform the authentication without updating the config file.
|
||||
options:
|
||||
registry_url:
|
||||
description:
|
||||
- The registry URL.
|
||||
type: str
|
||||
default: "https://index.docker.io/v1/"
|
||||
aliases:
|
||||
- registry
|
||||
- url
|
||||
username:
|
||||
description:
|
||||
- The username for the registry account.
|
||||
- Required when I(state) is C(present).
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The plaintext password for the registry account.
|
||||
- Required when I(state) is C(present).
|
||||
type: str
|
||||
email:
|
||||
description:
|
||||
- Does nothing, do not use.
|
||||
- Will be removed in Ansible 2.14.
|
||||
type: str
|
||||
reauthorize:
|
||||
description:
|
||||
- Refresh existing authentication found in the configuration file.
|
||||
type: bool
|
||||
default: no
|
||||
aliases:
|
||||
- reauth
|
||||
config_path:
|
||||
description:
|
||||
- Custom path to the Docker CLI configuration file.
|
||||
type: path
|
||||
default: ~/.docker/config.json
|
||||
aliases:
|
||||
- dockercfg_path
|
||||
state:
|
||||
description:
|
||||
- This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
|
||||
- To logout you only need the registry server, which defaults to DockerHub.
|
||||
- Before 2.1 you could ONLY log in.
|
||||
- Docker does not support 'logout' with a custom config file.
|
||||
type: str
|
||||
default: 'present'
|
||||
choices: ['present', 'absent']
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "L(Python bindings for docker credentials store API) >= 0.2.1
|
||||
(use L(docker-pycreds,https://pypi.org/project/docker-pycreds/) when using Docker SDK for Python < 4.0.0)"
|
||||
- "Docker API >= 1.20"
|
||||
author:
|
||||
- Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Log into DockerHub
|
||||
docker_login:
|
||||
username: docker
|
||||
password: rekcod
|
||||
|
||||
- name: Log into private registry and force re-authorization
|
||||
docker_login:
|
||||
registry_url: your.private.registry.io
|
||||
username: yourself
|
||||
password: secrets3
|
||||
reauthorize: yes
|
||||
|
||||
- name: Log into DockerHub using a custom config file
|
||||
docker_login:
|
||||
username: docker
|
||||
password: rekcod
|
||||
config_path: /tmp/.mydockercfg
|
||||
|
||||
- name: Log out of DockerHub
|
||||
docker_login:
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
login_results:
|
||||
description: Results from the login.
|
||||
returned: when state='present'
|
||||
type: dict
|
||||
sample: {
|
||||
"serveraddress": "localhost:5000",
|
||||
"username": "testuser"
|
||||
}
|
||||
'''
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
from docker import auth
|
||||
|
||||
# Earlier versions of docker/docker-py put decode_auth
|
||||
# in docker.auth.auth instead of docker.auth
|
||||
if hasattr(auth, 'decode_auth'):
|
||||
from docker.auth import decode_auth
|
||||
else:
|
||||
from docker.auth.auth import decode_auth
|
||||
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
HAS_DOCKER_PY,
|
||||
DEFAULT_DOCKER_REGISTRY,
|
||||
DockerBaseClass,
|
||||
EMAIL_REGEX,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
NEEDS_DOCKER_PYCREDS = False
|
||||
|
||||
# Early versions of docker/docker-py rely on docker-pycreds for
|
||||
# the credential store api.
|
||||
if HAS_DOCKER_PY:
|
||||
try:
|
||||
from docker.credentials.errors import StoreError, CredentialsNotFound
|
||||
from docker.credentials import Store
|
||||
except ImportError:
|
||||
try:
|
||||
from dockerpycreds.errors import StoreError, CredentialsNotFound
|
||||
from dockerpycreds.store import Store
|
||||
except ImportError as exc:
|
||||
HAS_DOCKER_ERROR = str(exc)
|
||||
NEEDS_DOCKER_PYCREDS = True
|
||||
|
||||
|
||||
if NEEDS_DOCKER_PYCREDS:
|
||||
# docker-pycreds missing, so we need to create some place holder classes
|
||||
# to allow instantiation.
|
||||
|
||||
class StoreError(Exception):
|
||||
pass
|
||||
|
||||
class CredentialsNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DockerFileStore(object):
|
||||
'''
|
||||
A custom credential store class that implements only the functionality we need to
|
||||
update the docker config file when no credential helpers is provided.
|
||||
'''
|
||||
|
||||
program = "<legacy config>"
|
||||
|
||||
def __init__(self, config_path):
|
||||
self._config_path = config_path
|
||||
|
||||
# Make sure we have a minimal config if none is available.
|
||||
self._config = dict(
|
||||
auths=dict()
|
||||
)
|
||||
|
||||
try:
|
||||
# Attempt to read the existing config.
|
||||
with open(self._config_path, "r") as f:
|
||||
config = json.load(f)
|
||||
except (ValueError, IOError):
|
||||
# No config found or an invalid config found so we'll ignore it.
|
||||
config = dict()
|
||||
|
||||
# Update our internal config with what ever was loaded.
|
||||
self._config.update(config)
|
||||
|
||||
@property
|
||||
def config_path(self):
|
||||
'''
|
||||
Return the config path configured in this DockerFileStore instance.
|
||||
'''
|
||||
|
||||
return self._config_path
|
||||
|
||||
def get(self, server):
|
||||
'''
|
||||
Retrieve credentials for `server` if there are any in the config file.
|
||||
Otherwise raise a `StoreError`
|
||||
'''
|
||||
|
||||
server_creds = self._config['auths'].get(server)
|
||||
if not server_creds:
|
||||
raise CredentialsNotFound('No matching credentials')
|
||||
|
||||
(username, password) = decode_auth(server_creds['auth'])
|
||||
|
||||
return dict(
|
||||
Username=username,
|
||||
Secret=password
|
||||
)
|
||||
|
||||
def _write(self):
|
||||
'''
|
||||
Write config back out to disk.
|
||||
'''
|
||||
# Make sure directory exists
|
||||
dir = os.path.dirname(self._config_path)
|
||||
if not os.path.exists(dir):
|
||||
os.makedirs(dir)
|
||||
# Write config; make sure it has permissions 0x600
|
||||
content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
|
||||
f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
|
||||
try:
|
||||
os.write(f, content)
|
||||
finally:
|
||||
os.close(f)
|
||||
|
||||
def store(self, server, username, password):
|
||||
'''
|
||||
Add a credentials for `server` to the current configuration.
|
||||
'''
|
||||
|
||||
b64auth = base64.b64encode(
|
||||
to_bytes(username) + b':' + to_bytes(password)
|
||||
)
|
||||
auth = to_text(b64auth)
|
||||
|
||||
# build up the auth structure
|
||||
new_auth = dict(
|
||||
auths=dict()
|
||||
)
|
||||
new_auth['auths'][server] = dict(
|
||||
auth=auth
|
||||
)
|
||||
|
||||
self._config.update(new_auth)
|
||||
self._write()
|
||||
|
||||
def erase(self, server):
|
||||
'''
|
||||
Remove credentials for the given server from the configuration.
|
||||
'''
|
||||
|
||||
self._config['auths'].pop(server)
|
||||
self._write()
|
||||
|
||||
|
||||
class LoginManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(LoginManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
parameters = self.client.module.params
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.registry_url = parameters.get('registry_url')
|
||||
self.username = parameters.get('username')
|
||||
self.password = parameters.get('password')
|
||||
self.email = parameters.get('email')
|
||||
self.reauthorize = parameters.get('reauthorize')
|
||||
self.config_path = parameters.get('config_path')
|
||||
self.state = parameters.get('state')
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Do the actuall work of this task here. This allows instantiation for partial
|
||||
testing.
|
||||
'''
|
||||
|
||||
if self.state == 'present':
|
||||
self.login()
|
||||
else:
|
||||
self.logout()
|
||||
|
||||
def fail(self, msg):
|
||||
self.client.fail(msg)
|
||||
|
||||
def login(self):
|
||||
'''
|
||||
Log into the registry with provided username/password. On success update the config
|
||||
file with the new authorization.
|
||||
|
||||
:return: None
|
||||
'''
|
||||
|
||||
if self.email and not re.match(EMAIL_REGEX, self.email):
|
||||
self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match "
|
||||
"/%s/" % (EMAIL_REGEX))
|
||||
|
||||
self.results['actions'].append("Logged into %s" % (self.registry_url))
|
||||
self.log("Log into %s with username %s" % (self.registry_url, self.username))
|
||||
try:
|
||||
response = self.client.login(
|
||||
self.username,
|
||||
password=self.password,
|
||||
email=self.email,
|
||||
registry=self.registry_url,
|
||||
reauth=self.reauthorize,
|
||||
dockercfg_path=self.config_path
|
||||
)
|
||||
except Exception as exc:
|
||||
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
|
||||
|
||||
# If user is already logged in, then response contains password for user
|
||||
if 'password' in response:
|
||||
# This returns correct password if user is logged in and wrong password is given.
|
||||
# So if it returns another password as we passed, and the user didn't request to
|
||||
# reauthorize, still do it.
|
||||
if not self.reauthorize and response['password'] != self.password:
|
||||
try:
|
||||
response = self.client.login(
|
||||
self.username,
|
||||
password=self.password,
|
||||
email=self.email,
|
||||
registry=self.registry_url,
|
||||
reauth=True,
|
||||
dockercfg_path=self.config_path
|
||||
)
|
||||
except Exception as exc:
|
||||
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
|
||||
response.pop('password', None)
|
||||
self.results['login_result'] = response
|
||||
|
||||
self.update_credentials()
|
||||
|
||||
def logout(self):
|
||||
'''
|
||||
Log out of the registry. On success update the config file.
|
||||
|
||||
:return: None
|
||||
'''
|
||||
|
||||
# Get the configuration store.
|
||||
store = self.get_credential_store_instance(self.registry_url, self.config_path)
|
||||
|
||||
try:
|
||||
current = store.get(self.registry_url)
|
||||
except CredentialsNotFound:
|
||||
# get raises an exception on not found.
|
||||
self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
|
||||
self.results['changed'] = False
|
||||
return
|
||||
|
||||
if not self.check_mode:
|
||||
store.erase(self.registry_url)
|
||||
self.results['changed'] = True
|
||||
|
||||
def update_credentials(self):
|
||||
'''
|
||||
If the authorization is not stored attempt to store authorization values via
|
||||
the appropriate credential helper or to the config file.
|
||||
|
||||
:return: None
|
||||
'''
|
||||
|
||||
# Check to see if credentials already exist.
|
||||
store = self.get_credential_store_instance(self.registry_url, self.config_path)
|
||||
|
||||
try:
|
||||
current = store.get(self.registry_url)
|
||||
except CredentialsNotFound:
|
||||
# get raises an exception on not found.
|
||||
current = dict(
|
||||
Username='',
|
||||
Secret=''
|
||||
)
|
||||
|
||||
if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
|
||||
if not self.check_mode:
|
||||
store.store(self.registry_url, self.username, self.password)
|
||||
self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
|
||||
self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
|
||||
store.program, self.registry_url))
|
||||
self.results['changed'] = True
|
||||
|
||||
def get_credential_store_instance(self, registry, dockercfg_path):
|
||||
'''
|
||||
Return an instance of docker.credentials.Store used by the given registry.
|
||||
|
||||
:return: A Store or None
|
||||
:rtype: Union[docker.credentials.Store, NoneType]
|
||||
'''
|
||||
|
||||
# Older versions of docker-py don't have this feature.
|
||||
try:
|
||||
credstore_env = self.client.credstore_env
|
||||
except AttributeError:
|
||||
credstore_env = None
|
||||
|
||||
config = auth.load_config(config_path=dockercfg_path)
|
||||
|
||||
if hasattr(auth, 'get_credential_store'):
|
||||
store_name = auth.get_credential_store(config, registry)
|
||||
elif 'credsStore' in config:
|
||||
store_name = config['credsStore']
|
||||
else:
|
||||
store_name = None
|
||||
|
||||
# Make sure that there is a credential helper before trying to instantiate a
|
||||
# Store object.
|
||||
if store_name:
|
||||
self.log("Found credential store %s" % store_name)
|
||||
return Store(store_name, environment=credstore_env)
|
||||
|
||||
return DockerFileStore(dockercfg_path)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = dict(
|
||||
registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
|
||||
username=dict(type='str'),
|
||||
password=dict(type='str', no_log=True),
|
||||
email=dict(type='str', removed_in_version='2.14'),
|
||||
reauthorize=dict(type='bool', default=False, aliases=['reauth']),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['username', 'password']),
|
||||
]
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
min_docker_api_version='1.20',
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
actions=[],
|
||||
login_result={}
|
||||
)
|
||||
|
||||
manager = LoginManager(client, results)
|
||||
manager.run()
|
||||
|
||||
if 'actions' in results:
|
||||
del results['actions']
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
722
plugins/modules/docker_network.py
Normal file
722
plugins/modules/docker_network.py
Normal file
@ -0,0 +1,722 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: docker_network
|
||||
short_description: Manage Docker networks
|
||||
description:
|
||||
- Create/remove Docker networks and connect containers to them.
|
||||
- Performs largely the same function as the "docker network" CLI subcommand.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the network to operate on.
|
||||
type: str
|
||||
required: yes
|
||||
aliases:
|
||||
- network_name
|
||||
|
||||
connected:
|
||||
description:
|
||||
- List of container names or container IDs to connect to a network.
|
||||
- Please note that the module only makes sure that these containers are connected to the network,
|
||||
but does not care about connection options. If you rely on specific IP addresses etc., use the
|
||||
M(docker_container) module to ensure your containers are correctly connected to this network.
|
||||
type: list
|
||||
elements: str
|
||||
aliases:
|
||||
- containers
|
||||
|
||||
driver:
|
||||
description:
|
||||
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
|
||||
type: str
|
||||
default: bridge
|
||||
|
||||
driver_options:
|
||||
description:
|
||||
- Dictionary of network settings. Consult docker docs for valid options and values.
|
||||
type: dict
|
||||
|
||||
force:
|
||||
description:
|
||||
- With state C(absent) forces disconnecting all containers from the
|
||||
network prior to deleting the network. With state C(present) will
|
||||
disconnect all containers, delete the network and re-create the
|
||||
network.
|
||||
- This option is required if you have changed the IPAM or driver options
|
||||
and want an existing network to be updated to use the new options.
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
appends:
|
||||
description:
|
||||
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
|
||||
- Use I(appends) to leave existing containers connected.
|
||||
type: bool
|
||||
default: no
|
||||
aliases:
|
||||
- incremental
|
||||
|
||||
enable_ipv6:
|
||||
description:
|
||||
- Enable IPv6 networking.
|
||||
type: bool
|
||||
|
||||
ipam_driver:
|
||||
description:
|
||||
- Specify an IPAM driver.
|
||||
type: str
|
||||
|
||||
ipam_driver_options:
|
||||
description:
|
||||
- Dictionary of IPAM driver options.
|
||||
type: dict
|
||||
|
||||
ipam_options:
|
||||
description:
|
||||
- Dictionary of IPAM options.
|
||||
- Deprecated in 2.8, will be removed in 2.12. Use parameter I(ipam_config) instead. In Docker 1.10.0, IPAM
|
||||
options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses
|
||||
the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options)
|
||||
parameter.
|
||||
type: dict
|
||||
suboptions:
|
||||
subnet:
|
||||
description:
|
||||
- IP subset in CIDR notation.
|
||||
type: str
|
||||
iprange:
|
||||
description:
|
||||
- IP address range in CIDR notation.
|
||||
type: str
|
||||
gateway:
|
||||
description:
|
||||
- IP gateway address.
|
||||
type: str
|
||||
aux_addresses:
|
||||
description:
|
||||
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
|
||||
type: dict
|
||||
|
||||
ipam_config:
|
||||
description:
|
||||
- List of IPAM config blocks. Consult
|
||||
L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
|
||||
Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
subnet:
|
||||
description:
|
||||
- IP subset in CIDR notation.
|
||||
type: str
|
||||
iprange:
|
||||
description:
|
||||
- IP address range in CIDR notation.
|
||||
type: str
|
||||
gateway:
|
||||
description:
|
||||
- IP gateway address.
|
||||
type: str
|
||||
aux_addresses:
|
||||
description:
|
||||
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
|
||||
type: dict
|
||||
|
||||
state:
|
||||
description:
|
||||
- C(absent) deletes the network. If a network has connected containers, it
|
||||
cannot be deleted. Use the I(force) option to disconnect all containers
|
||||
and delete the network.
|
||||
- C(present) creates the network, if it does not already exist with the
|
||||
specified parameters, and connects the list of containers provided via
|
||||
the connected parameter. Containers not on the list will be disconnected.
|
||||
An empty list will leave no containers connected to the network. Use the
|
||||
I(appends) option to leave existing containers connected. Use the I(force)
|
||||
options to force re-creation of the network.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
internal:
|
||||
description:
|
||||
- Restrict external access to the network.
|
||||
type: bool
|
||||
|
||||
labels:
|
||||
description:
|
||||
- Dictionary of labels.
|
||||
type: dict
|
||||
|
||||
scope:
|
||||
description:
|
||||
- Specify the network's scope.
|
||||
type: str
|
||||
choices:
|
||||
- local
|
||||
- global
|
||||
- swarm
|
||||
|
||||
attachable:
|
||||
description:
|
||||
- If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
|
||||
type: bool
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
notes:
|
||||
- When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
|
||||
It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
|
||||
connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
|
||||
network, loop the M(docker_container) module to loop over your containers to make sure they are connected properly.
|
||||
- The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the
|
||||
network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
|
||||
fail as well.
|
||||
|
||||
author:
|
||||
- "Ben Keith (@keitwb)"
|
||||
- "Chris Houseknecht (@chouseknecht)"
|
||||
- "Dave Bendit (@DBendit)"
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "The docker server >= 1.10.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a network
|
||||
docker_network:
|
||||
name: network_one
|
||||
|
||||
- name: Remove all but selected list of containers
|
||||
docker_network:
|
||||
name: network_one
|
||||
connected:
|
||||
- container_a
|
||||
- container_b
|
||||
- container_c
|
||||
|
||||
- name: Remove a single container
|
||||
docker_network:
|
||||
name: network_one
|
||||
connected: "{{ fulllist|difference(['container_a']) }}"
|
||||
|
||||
- name: Add a container to a network, leaving existing containers connected
|
||||
docker_network:
|
||||
name: network_one
|
||||
connected:
|
||||
- container_a
|
||||
appends: yes
|
||||
|
||||
- name: Create a network with driver options
|
||||
docker_network:
|
||||
name: network_two
|
||||
driver_options:
|
||||
com.docker.network.bridge.name: net2
|
||||
|
||||
- name: Create a network with custom IPAM config
|
||||
docker_network:
|
||||
name: network_three
|
||||
ipam_config:
|
||||
- subnet: 172.3.27.0/24
|
||||
gateway: 172.3.27.2
|
||||
iprange: 172.3.27.0/26
|
||||
aux_addresses:
|
||||
host1: 172.3.27.3
|
||||
host2: 172.3.27.4
|
||||
|
||||
- name: Create a network with labels
|
||||
docker_network:
|
||||
name: network_four
|
||||
labels:
|
||||
key1: value1
|
||||
key2: value2
|
||||
|
||||
- name: Create a network with IPv6 IPAM config
|
||||
docker_network:
|
||||
name: network_ipv6_one
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: fdd1:ac8c:0557:7ce1::/64
|
||||
|
||||
- name: Create a network with IPv6 and custom IPv4 IPAM config
|
||||
docker_network:
|
||||
name: network_ipv6_two
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: 172.4.27.0/24
|
||||
- subnet: fdd1:ac8c:0557:7ce2::/64
|
||||
|
||||
- name: Delete a network, disconnecting all containers
|
||||
docker_network:
|
||||
name: network_one
|
||||
state: absent
|
||||
force: yes
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
network:
|
||||
description:
|
||||
- Network inspection results for the affected network.
|
||||
- Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
|
||||
are also accessible directly as C(docker_network). Note that the returned fact will be removed in Ansible 2.12.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
'''
|
||||
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
docker_version,
|
||||
DifferenceTracker,
|
||||
clean_dict_booleans_for_docker_api,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
try:
|
||||
from docker import utils
|
||||
from docker.errors import DockerException
|
||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
from docker.types import IPAMPool, IPAMConfig
|
||||
except Exception:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self, client):
|
||||
super(TaskParameters, self).__init__()
|
||||
self.client = client
|
||||
|
||||
self.name = None
|
||||
self.connected = None
|
||||
self.driver = None
|
||||
self.driver_options = None
|
||||
self.ipam_driver = None
|
||||
self.ipam_driver_options = None
|
||||
self.ipam_options = None
|
||||
self.ipam_config = None
|
||||
self.appends = None
|
||||
self.force = None
|
||||
self.internal = None
|
||||
self.labels = None
|
||||
self.debug = None
|
||||
self.enable_ipv6 = None
|
||||
self.scope = None
|
||||
self.attachable = None
|
||||
|
||||
for key, value in client.module.params.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
def container_names_in_network(network):
|
||||
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
|
||||
|
||||
|
||||
CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
|
||||
CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
|
||||
|
||||
|
||||
def validate_cidr(cidr):
|
||||
"""Validate CIDR. Return IP version of a CIDR string on success.
|
||||
|
||||
:param cidr: Valid CIDR
|
||||
:type cidr: str
|
||||
:return: ``ipv4`` or ``ipv6``
|
||||
:rtype: str
|
||||
:raises ValueError: If ``cidr`` is not a valid CIDR
|
||||
"""
|
||||
if CIDR_IPV4.match(cidr):
|
||||
return 'ipv4'
|
||||
elif CIDR_IPV6.match(cidr):
|
||||
return 'ipv6'
|
||||
raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
|
||||
|
||||
|
||||
def normalize_ipam_config_key(key):
|
||||
"""Normalizes IPAM config keys returned by Docker API to match Ansible keys.
|
||||
|
||||
:param key: Docker API key
|
||||
:type key: str
|
||||
:return Ansible module key
|
||||
:rtype str
|
||||
"""
|
||||
special_cases = {
|
||||
'AuxiliaryAddresses': 'aux_addresses'
|
||||
}
|
||||
return special_cases.get(key, key.lower())
|
||||
|
||||
|
||||
def dicts_are_essentially_equal(a, b):
|
||||
"""Make sure that a is a subset of b, where None entries of a are ignored."""
|
||||
for k, v in a.items():
|
||||
if v is None:
|
||||
continue
|
||||
if b.get(k) != v:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class DockerNetworkManager(object):
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
self.parameters = TaskParameters(client)
|
||||
self.check_mode = self.client.check_mode
|
||||
self.results = {
|
||||
u'changed': False,
|
||||
u'actions': []
|
||||
}
|
||||
self.diff = self.client.module._diff
|
||||
self.diff_tracker = DifferenceTracker()
|
||||
self.diff_result = dict()
|
||||
|
||||
self.existing_network = self.get_existing_network()
|
||||
|
||||
if not self.parameters.connected and self.existing_network:
|
||||
self.parameters.connected = container_names_in_network(self.existing_network)
|
||||
|
||||
if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
|
||||
self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
|
||||
self.parameters.ipam_config = [self.parameters.ipam_options]
|
||||
|
||||
if self.parameters.ipam_config:
|
||||
try:
|
||||
for ipam_config in self.parameters.ipam_config:
|
||||
validate_cidr(ipam_config['subnet'])
|
||||
except ValueError as e:
|
||||
self.client.fail(str(e))
|
||||
|
||||
if self.parameters.driver_options:
|
||||
self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
|
||||
|
||||
state = self.parameters.state
|
||||
if state == 'present':
|
||||
self.present()
|
||||
elif state == 'absent':
|
||||
self.absent()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
if self.diff:
|
||||
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
|
||||
self.results['diff'] = self.diff_result
|
||||
|
||||
def get_existing_network(self):
|
||||
return self.client.get_network(name=self.parameters.name)
|
||||
|
||||
def has_different_config(self, net):
|
||||
'''
|
||||
Evaluates an existing network and returns a tuple containing a boolean
|
||||
indicating if the configuration is different and a list of differences.
|
||||
|
||||
:param net: the inspection output for an existing network
|
||||
:return: (bool, list)
|
||||
'''
|
||||
differences = DifferenceTracker()
|
||||
if self.parameters.driver and self.parameters.driver != net['Driver']:
|
||||
differences.add('driver',
|
||||
parameter=self.parameters.driver,
|
||||
active=net['Driver'])
|
||||
if self.parameters.driver_options:
|
||||
if not net.get('Options'):
|
||||
differences.add('driver_options',
|
||||
parameter=self.parameters.driver_options,
|
||||
active=net.get('Options'))
|
||||
else:
|
||||
for key, value in self.parameters.driver_options.items():
|
||||
if not (key in net['Options']) or value != net['Options'][key]:
|
||||
differences.add('driver_options.%s' % key,
|
||||
parameter=value,
|
||||
active=net['Options'].get(key))
|
||||
|
||||
if self.parameters.ipam_driver:
|
||||
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
|
||||
differences.add('ipam_driver',
|
||||
parameter=self.parameters.ipam_driver,
|
||||
active=net.get('IPAM'))
|
||||
|
||||
if self.parameters.ipam_driver_options is not None:
|
||||
ipam_driver_options = net['IPAM'].get('Options') or {}
|
||||
if ipam_driver_options != self.parameters.ipam_driver_options:
|
||||
differences.add('ipam_driver_options',
|
||||
parameter=self.parameters.ipam_driver_options,
|
||||
active=ipam_driver_options)
|
||||
|
||||
if self.parameters.ipam_config is not None and self.parameters.ipam_config:
|
||||
if not net.get('IPAM') or not net['IPAM']['Config']:
|
||||
differences.add('ipam_config',
|
||||
parameter=self.parameters.ipam_config,
|
||||
active=net.get('IPAM', {}).get('Config'))
|
||||
else:
|
||||
# Put network's IPAM config into the same format as module's IPAM config
|
||||
net_ipam_configs = []
|
||||
for net_ipam_config in net['IPAM']['Config']:
|
||||
config = dict()
|
||||
for k, v in net_ipam_config.items():
|
||||
config[normalize_ipam_config_key(k)] = v
|
||||
net_ipam_configs.append(config)
|
||||
# Compare lists of dicts as sets of dicts
|
||||
for idx, ipam_config in enumerate(self.parameters.ipam_config):
|
||||
net_config = dict()
|
||||
for net_ipam_config in net_ipam_configs:
|
||||
if dicts_are_essentially_equal(ipam_config, net_ipam_config):
|
||||
net_config = net_ipam_config
|
||||
break
|
||||
for key, value in ipam_config.items():
|
||||
if value is None:
|
||||
# due to recursive argument_spec, all keys are always present
|
||||
# (but have default value None if not specified)
|
||||
continue
|
||||
if value != net_config.get(key):
|
||||
differences.add('ipam_config[%s].%s' % (idx, key),
|
||||
parameter=value,
|
||||
active=net_config.get(key))
|
||||
|
||||
if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
|
||||
differences.add('enable_ipv6',
|
||||
parameter=self.parameters.enable_ipv6,
|
||||
active=net.get('EnableIPv6', False))
|
||||
|
||||
if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
|
||||
differences.add('internal',
|
||||
parameter=self.parameters.internal,
|
||||
active=net.get('Internal'))
|
||||
|
||||
if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
|
||||
differences.add('scope',
|
||||
parameter=self.parameters.scope,
|
||||
active=net.get('Scope'))
|
||||
|
||||
if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
|
||||
differences.add('attachable',
|
||||
parameter=self.parameters.attachable,
|
||||
active=net.get('Attachable'))
|
||||
if self.parameters.labels:
|
||||
if not net.get('Labels'):
|
||||
differences.add('labels',
|
||||
parameter=self.parameters.labels,
|
||||
active=net.get('Labels'))
|
||||
else:
|
||||
for key, value in self.parameters.labels.items():
|
||||
if not (key in net['Labels']) or value != net['Labels'][key]:
|
||||
differences.add('labels.%s' % key,
|
||||
parameter=value,
|
||||
active=net['Labels'].get(key))
|
||||
|
||||
return not differences.empty, differences
|
||||
|
||||
def create_network(self):
|
||||
if not self.existing_network:
|
||||
params = dict(
|
||||
driver=self.parameters.driver,
|
||||
options=self.parameters.driver_options,
|
||||
)
|
||||
|
||||
ipam_pools = []
|
||||
if self.parameters.ipam_config:
|
||||
for ipam_pool in self.parameters.ipam_config:
|
||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
ipam_pools.append(IPAMPool(**ipam_pool))
|
||||
else:
|
||||
ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
|
||||
|
||||
if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
|
||||
# Only add ipam parameter if a driver was specified or if IPAM parameters
|
||||
# were specified. Leaving this parameter away can significantly speed up
|
||||
# creation; on my machine creation with this option needs ~15 seconds,
|
||||
# and without just a few seconds.
|
||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
|
||||
pool_configs=ipam_pools,
|
||||
options=self.parameters.ipam_driver_options)
|
||||
else:
|
||||
params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
|
||||
pool_configs=ipam_pools)
|
||||
|
||||
if self.parameters.enable_ipv6 is not None:
|
||||
params['enable_ipv6'] = self.parameters.enable_ipv6
|
||||
if self.parameters.internal is not None:
|
||||
params['internal'] = self.parameters.internal
|
||||
if self.parameters.scope is not None:
|
||||
params['scope'] = self.parameters.scope
|
||||
if self.parameters.attachable is not None:
|
||||
params['attachable'] = self.parameters.attachable
|
||||
if self.parameters.labels:
|
||||
params['labels'] = self.parameters.labels
|
||||
|
||||
if not self.check_mode:
|
||||
resp = self.client.create_network(self.parameters.name, **params)
|
||||
self.client.report_warnings(resp, ['Warning'])
|
||||
self.existing_network = self.client.get_network(network_id=resp['Id'])
|
||||
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
|
||||
self.results['changed'] = True
|
||||
|
||||
def remove_network(self):
|
||||
if self.existing_network:
|
||||
self.disconnect_all_containers()
|
||||
if not self.check_mode:
|
||||
self.client.remove_network(self.parameters.name)
|
||||
self.results['actions'].append("Removed network %s" % (self.parameters.name,))
|
||||
self.results['changed'] = True
|
||||
|
||||
def is_container_connected(self, container_name):
|
||||
if not self.existing_network:
|
||||
return False
|
||||
return container_name in container_names_in_network(self.existing_network)
|
||||
|
||||
def connect_containers(self):
|
||||
for name in self.parameters.connected:
|
||||
if not self.is_container_connected(name):
|
||||
if not self.check_mode:
|
||||
self.client.connect_container_to_network(name, self.parameters.name)
|
||||
self.results['actions'].append("Connected container %s" % (name,))
|
||||
self.results['changed'] = True
|
||||
self.diff_tracker.add('connected.{0}'.format(name),
|
||||
parameter=True,
|
||||
active=False)
|
||||
|
||||
def disconnect_missing(self):
|
||||
if not self.existing_network:
|
||||
return
|
||||
containers = self.existing_network['Containers']
|
||||
if not containers:
|
||||
return
|
||||
for c in containers.values():
|
||||
name = c['Name']
|
||||
if name not in self.parameters.connected:
|
||||
self.disconnect_container(name)
|
||||
|
||||
def disconnect_all_containers(self):
|
||||
containers = self.client.get_network(name=self.parameters.name)['Containers']
|
||||
if not containers:
|
||||
return
|
||||
for cont in containers.values():
|
||||
self.disconnect_container(cont['Name'])
|
||||
|
||||
def disconnect_container(self, container_name):
|
||||
if not self.check_mode:
|
||||
self.client.disconnect_container_from_network(container_name, self.parameters.name)
|
||||
self.results['actions'].append("Disconnected container %s" % (container_name,))
|
||||
self.results['changed'] = True
|
||||
self.diff_tracker.add('connected.{0}'.format(container_name),
|
||||
parameter=False,
|
||||
active=True)
|
||||
|
||||
def present(self):
|
||||
different = False
|
||||
differences = DifferenceTracker()
|
||||
if self.existing_network:
|
||||
different, differences = self.has_different_config(self.existing_network)
|
||||
|
||||
self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
|
||||
if self.parameters.force or different:
|
||||
self.remove_network()
|
||||
self.existing_network = None
|
||||
|
||||
self.create_network()
|
||||
self.connect_containers()
|
||||
if not self.parameters.appends:
|
||||
self.disconnect_missing()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
|
||||
self.diff_tracker.merge(differences)
|
||||
|
||||
if not self.check_mode and not self.parameters.debug:
|
||||
self.results.pop('actions')
|
||||
|
||||
network_facts = self.get_existing_network()
|
||||
self.results['ansible_facts'] = {u'docker_network': network_facts}
|
||||
self.results['network'] = network_facts
|
||||
|
||||
def absent(self):
|
||||
self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
|
||||
self.remove_network()
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True, aliases=['network_name']),
|
||||
connected=dict(type='list', default=[], elements='str', aliases=['containers']),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
driver=dict(type='str', default='bridge'),
|
||||
driver_options=dict(type='dict', default={}),
|
||||
force=dict(type='bool', default=False),
|
||||
appends=dict(type='bool', default=False, aliases=['incremental']),
|
||||
ipam_driver=dict(type='str'),
|
||||
ipam_driver_options=dict(type='dict'),
|
||||
ipam_options=dict(type='dict', default={}, options=dict(
|
||||
subnet=dict(type='str'),
|
||||
iprange=dict(type='str'),
|
||||
gateway=dict(type='str'),
|
||||
aux_addresses=dict(type='dict'),
|
||||
), removed_in_version='2.12'),
|
||||
ipam_config=dict(type='list', elements='dict', options=dict(
|
||||
subnet=dict(type='str'),
|
||||
iprange=dict(type='str'),
|
||||
gateway=dict(type='str'),
|
||||
aux_addresses=dict(type='dict'),
|
||||
)),
|
||||
enable_ipv6=dict(type='bool'),
|
||||
internal=dict(type='bool'),
|
||||
labels=dict(type='dict', default={}),
|
||||
debug=dict(type='bool', default=False),
|
||||
scope=dict(type='str', choices=['local', 'global', 'swarm']),
|
||||
attachable=dict(type='bool'),
|
||||
)
|
||||
|
||||
mutually_exclusive = [
|
||||
('ipam_config', 'ipam_options')
|
||||
]
|
||||
|
||||
option_minimal_versions = dict(
|
||||
scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
|
||||
labels=dict(docker_api_version='1.23'),
|
||||
ipam_driver_options=dict(docker_py_version='2.0.0'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.22',
|
||||
# "The docker server >= 1.10.0"
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
try:
|
||||
cm = DockerNetworkManager(client)
|
||||
client.module.exit_json(**cm.results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
146
plugins/modules/docker_network_info.py
Normal file
146
plugins/modules/docker_network_info.py
Normal file
@ -0,0 +1,146 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_network_info
|
||||
|
||||
short_description: Retrieves facts about docker network
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker network.
|
||||
- Essentially returns the output of C(docker network inspect <name>), similar to what M(docker_network)
|
||||
returns for a non-absent network.
|
||||
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the network to inspect.
|
||||
- When identifying an existing network name may be a name or a long or short network ID.
|
||||
type: str
|
||||
required: yes
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- "Dave Bendit (@DBendit)"
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.21"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get infos on network
|
||||
docker_network_info:
|
||||
name: mydata
|
||||
register: result
|
||||
|
||||
- name: Does network exist?
|
||||
debug:
|
||||
msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
|
||||
|
||||
- name: Print information about network
|
||||
debug:
|
||||
var: result.network
|
||||
when: result.exists
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the network exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
network:
|
||||
description:
|
||||
- Facts representing the current state of the network. Matches the docker inspection output.
|
||||
- Will be C(none) if network does not exist.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: '{
|
||||
"Attachable": false,
|
||||
"ConfigFrom": {
|
||||
"Network": ""
|
||||
},
|
||||
"ConfigOnly": false,
|
||||
"Containers": {},
|
||||
"Created": "2018-12-07T01:47:51.250835114-06:00",
|
||||
"Driver": "bridge",
|
||||
"EnableIPv6": false,
|
||||
"IPAM": {
|
||||
"Config": [
|
||||
{
|
||||
"Gateway": "192.168.96.1",
|
||||
"Subnet": "192.168.96.0/20"
|
||||
}
|
||||
],
|
||||
"Driver": "default",
|
||||
"Options": null
|
||||
},
|
||||
"Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
|
||||
"Ingress": false,
|
||||
"Internal": false,
|
||||
"Labels": {},
|
||||
"Name": "ansible-test-f2700bba",
|
||||
"Options": {},
|
||||
"Scope": "local"
|
||||
}'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_api_version='1.21',
|
||||
)
|
||||
|
||||
try:
|
||||
network = client.get_network(client.module.params['name'])
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
exists=(True if network else False),
|
||||
network=network,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
298
plugins/modules/docker_node.py
Normal file
298
plugins/modules/docker_node.py
Normal file
@ -0,0 +1,298 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_node
|
||||
short_description: Manage Docker Swarm node
|
||||
description:
|
||||
- Manages the Docker nodes via Swarm Manager.
|
||||
- This module allows to change the node's role, its availability, and to modify, add or remove node labels.
|
||||
options:
|
||||
hostname:
|
||||
description:
|
||||
- The hostname or ID of node as registered in Swarm.
|
||||
- If more than one node is registered using the same hostname the ID must be used,
|
||||
otherwise module will fail.
|
||||
type: str
|
||||
required: yes
|
||||
labels:
|
||||
description:
|
||||
- User-defined key/value metadata that will be assigned as node attribute.
|
||||
- Label operations in this module apply to the docker swarm node specified by I(hostname).
|
||||
Use M(docker_swarm) module to add/modify/remove swarm cluster labels.
|
||||
- The actual state of labels assigned to the node when module completes its work depends on
|
||||
I(labels_state) and I(labels_to_remove) parameters values. See description below.
|
||||
type: dict
|
||||
labels_state:
|
||||
description:
|
||||
- It defines the operation on the labels assigned to node and labels specified in I(labels) option.
|
||||
- Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node.
|
||||
If no labels are assigned then it will add listed labels. For labels that are already assigned
|
||||
to the node, it will update their values. The labels not specified in I(labels) will remain unchanged.
|
||||
If I(labels) is empty then no changes will be made.
|
||||
- Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then
|
||||
all labels assigned to the node will be removed.
|
||||
type: str
|
||||
default: 'merge'
|
||||
choices:
|
||||
- merge
|
||||
- replace
|
||||
labels_to_remove:
|
||||
description:
|
||||
- List of labels that will be removed from the node configuration. The list has to contain only label
|
||||
names, not their values.
|
||||
- If the label provided on the list is not assigned to the node, the entry is ignored.
|
||||
- If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains
|
||||
assigned to the node.
|
||||
- If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to
|
||||
node are removed and I(labels_to_remove) is ignored.
|
||||
type: list
|
||||
elements: str
|
||||
availability:
|
||||
description: Node availability to assign. If not provided then node availability remains unchanged.
|
||||
choices:
|
||||
- active
|
||||
- pause
|
||||
- drain
|
||||
type: str
|
||||
role:
|
||||
description: Node role to assign. If not provided then node role remains unchanged.
|
||||
choices:
|
||||
- manager
|
||||
- worker
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
|
||||
- Docker API >= 1.25
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
- Thierry Bouvet (@tbouvet)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set node role
|
||||
docker_node:
|
||||
hostname: mynode
|
||||
role: manager
|
||||
|
||||
- name: Set node availability
|
||||
docker_node:
|
||||
hostname: mynode
|
||||
availability: drain
|
||||
|
||||
- name: Replace node labels with new labels
|
||||
docker_node:
|
||||
hostname: mynode
|
||||
labels:
|
||||
key: value
|
||||
labels_state: replace
|
||||
|
||||
- name: Merge node labels and new labels
|
||||
docker_node:
|
||||
hostname: mynode
|
||||
labels:
|
||||
key: value
|
||||
|
||||
- name: Remove all labels assigned to node
|
||||
docker_node:
|
||||
hostname: mynode
|
||||
labels_state: replace
|
||||
|
||||
- name: Remove selected labels from the node
|
||||
docker_node:
|
||||
hostname: mynode
|
||||
labels_to_remove:
|
||||
- key1
|
||||
- key2
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
node:
|
||||
description: Information about node after 'update' operation
|
||||
returned: success
|
||||
type: dict
|
||||
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
DockerBaseClass,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self, client):
|
||||
super(TaskParameters, self).__init__()
|
||||
|
||||
# Spec
|
||||
self.name = None
|
||||
self.labels = None
|
||||
self.labels_state = None
|
||||
self.labels_to_remove = None
|
||||
|
||||
# Node
|
||||
self.availability = None
|
||||
self.role = None
|
||||
|
||||
for key, value in client.module.params.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
class SwarmNodeManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(SwarmNodeManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.client.fail_task_if_not_swarm_manager()
|
||||
|
||||
self.parameters = TaskParameters(client)
|
||||
|
||||
self.node_update()
|
||||
|
||||
def node_update(self):
|
||||
if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
|
||||
self.client.fail("This node is not part of a swarm.")
|
||||
return
|
||||
|
||||
if self.client.check_if_swarm_node_is_down():
|
||||
self.client.fail("Can not update the node. The node is down.")
|
||||
|
||||
try:
|
||||
node_info = self.client.inspect_node(node_id=self.parameters.hostname)
|
||||
except APIError as exc:
|
||||
self.client.fail("Failed to get node information for %s" % to_native(exc))
|
||||
|
||||
changed = False
|
||||
node_spec = dict(
|
||||
Availability=self.parameters.availability,
|
||||
Role=self.parameters.role,
|
||||
Labels=self.parameters.labels,
|
||||
)
|
||||
|
||||
if self.parameters.role is None:
|
||||
node_spec['Role'] = node_info['Spec']['Role']
|
||||
else:
|
||||
if not node_info['Spec']['Role'] == self.parameters.role:
|
||||
node_spec['Role'] = self.parameters.role
|
||||
changed = True
|
||||
|
||||
if self.parameters.availability is None:
|
||||
node_spec['Availability'] = node_info['Spec']['Availability']
|
||||
else:
|
||||
if not node_info['Spec']['Availability'] == self.parameters.availability:
|
||||
node_info['Spec']['Availability'] = self.parameters.availability
|
||||
changed = True
|
||||
|
||||
if self.parameters.labels_state == 'replace':
|
||||
if self.parameters.labels is None:
|
||||
node_spec['Labels'] = {}
|
||||
if node_info['Spec']['Labels']:
|
||||
changed = True
|
||||
else:
|
||||
if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
|
||||
node_spec['Labels'] = self.parameters.labels
|
||||
changed = True
|
||||
elif self.parameters.labels_state == 'merge':
|
||||
node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
|
||||
if self.parameters.labels is not None:
|
||||
for key, value in self.parameters.labels.items():
|
||||
if node_spec['Labels'].get(key) != value:
|
||||
node_spec['Labels'][key] = value
|
||||
changed = True
|
||||
|
||||
if self.parameters.labels_to_remove is not None:
|
||||
for key in self.parameters.labels_to_remove:
|
||||
if self.parameters.labels is not None:
|
||||
if not self.parameters.labels.get(key):
|
||||
if node_spec['Labels'].get(key):
|
||||
node_spec['Labels'].pop(key)
|
||||
changed = True
|
||||
else:
|
||||
self.client.module.warn(
|
||||
"Label '%s' listed both in 'labels' and 'labels_to_remove'. "
|
||||
"Keeping the assigned label value."
|
||||
% to_native(key))
|
||||
else:
|
||||
if node_spec['Labels'].get(key):
|
||||
node_spec['Labels'].pop(key)
|
||||
changed = True
|
||||
|
||||
if changed is True:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
|
||||
node_spec=node_spec)
|
||||
except APIError as exc:
|
||||
self.client.fail("Failed to update node : %s" % to_native(exc))
|
||||
self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
|
||||
self.results['changed'] = changed
|
||||
else:
|
||||
self.results['node'] = node_info
|
||||
self.results['changed'] = changed
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
hostname=dict(type='str', required=True),
|
||||
labels=dict(type='dict'),
|
||||
labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
|
||||
labels_to_remove=dict(type='list', elements='str'),
|
||||
availability=dict(type='str', choices=['active', 'pause', 'drain']),
|
||||
role=dict(type='str', choices=['worker', 'manager']),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='2.4.0',
|
||||
min_docker_api_version='1.25',
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
SwarmNodeManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
160
plugins/modules/docker_node_info.py
Normal file
160
plugins/modules/docker_node_info.py
Normal file
@ -0,0 +1,160 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_node_info
|
||||
|
||||
short_description: Retrieves facts about docker swarm node from Swarm Manager
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker node.
|
||||
- Essentially returns the output of C(docker node inspect <name>).
|
||||
- Must be executed on a host running as Swarm Manager, otherwise the module will fail.
|
||||
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the node to inspect.
|
||||
- The list of nodes names to inspect.
|
||||
- If empty then return information of all nodes in Swarm cluster.
|
||||
- When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
|
||||
- If I(self) is C(true) then this parameter is ignored.
|
||||
type: list
|
||||
elements: str
|
||||
self:
|
||||
description:
|
||||
- If C(true), queries the node (i.e. the docker daemon) the module communicates with.
|
||||
- If C(true) then I(name) is ignored.
|
||||
- If C(false) then query depends on I(name) presence and value.
|
||||
type: bool
|
||||
default: no
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
|
||||
- "Docker API >= 1.24"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info on all nodes
|
||||
docker_node_info:
|
||||
register: result
|
||||
|
||||
- name: Get info on node
|
||||
docker_node_info:
|
||||
name: mynode
|
||||
register: result
|
||||
|
||||
- name: Get info on list of nodes
|
||||
docker_node_info:
|
||||
name:
|
||||
- mynode1
|
||||
- mynode2
|
||||
register: result
|
||||
|
||||
- name: Get info on host if it is Swarm Manager
|
||||
docker_node_info:
|
||||
self: true
|
||||
register: result
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
nodes:
|
||||
description:
|
||||
- Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
|
||||
- Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided.
|
||||
- If I(name) contains a list of nodes, the output will provide information on all nodes registered
|
||||
at the swarm, including nodes that left the swarm but haven't been removed from the cluster on swarm
|
||||
managers and nodes that are unreachable.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
def get_node_facts(client):
|
||||
|
||||
results = []
|
||||
|
||||
if client.module.params['self'] is True:
|
||||
self_node_id = client.get_swarm_node_id()
|
||||
node_info = client.get_node_inspect(node_id=self_node_id)
|
||||
results.append(node_info)
|
||||
return results
|
||||
|
||||
if client.module.params['name'] is None:
|
||||
node_info = client.get_all_nodes_inspect()
|
||||
return node_info
|
||||
|
||||
nodes = client.module.params['name']
|
||||
if not isinstance(nodes, list):
|
||||
nodes = [nodes]
|
||||
|
||||
for next_node_name in nodes:
|
||||
next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
|
||||
if next_node_info:
|
||||
results.append(next_node_info)
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='list', elements='str'),
|
||||
self=dict(type='bool', default=False),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='2.4.0',
|
||||
min_docker_api_version='1.24',
|
||||
)
|
||||
|
||||
client.fail_task_if_not_swarm_manager()
|
||||
|
||||
try:
|
||||
nodes = get_node_facts(client)
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
nodes=nodes,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
270
plugins/modules/docker_prune.py
Normal file
270
plugins/modules/docker_prune.py
Normal file
@ -0,0 +1,270 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_prune
|
||||
|
||||
short_description: Allows to prune various docker objects
|
||||
|
||||
description:
|
||||
- Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
|
||||
and C(docker volume prune) via the Docker API.
|
||||
|
||||
|
||||
options:
|
||||
containers:
|
||||
description:
|
||||
- Whether to prune containers.
|
||||
type: bool
|
||||
default: no
|
||||
containers_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting containers to delete.
|
||||
- "For example, C(until: 24h)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
images:
|
||||
description:
|
||||
- Whether to prune images.
|
||||
type: bool
|
||||
default: no
|
||||
images_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting images to delete.
|
||||
- "For example, C(dangling: true)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
networks:
|
||||
description:
|
||||
- Whether to prune networks.
|
||||
type: bool
|
||||
default: no
|
||||
networks_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting networks to delete.
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
volumes:
|
||||
description:
|
||||
- Whether to prune volumes.
|
||||
type: bool
|
||||
default: no
|
||||
volumes_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting volumes to delete.
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
builder_cache:
|
||||
description:
|
||||
- Whether to prune the builder cache.
|
||||
- Requires version 3.3.0 of the Docker SDK for Python or newer.
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_2_documentation
|
||||
|
||||
|
||||
author:
|
||||
- "Felix Fontein (@felixfontein)"
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
|
||||
- "Docker API >= 1.25"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Prune containers older than 24h
|
||||
docker_prune:
|
||||
containers: yes
|
||||
containers_filters:
|
||||
# only consider containers created more than 24 hours ago
|
||||
until: 24h
|
||||
|
||||
- name: Prune everything
|
||||
docker_prune:
|
||||
containers: yes
|
||||
images: yes
|
||||
networks: yes
|
||||
volumes: yes
|
||||
builder_cache: yes
|
||||
|
||||
- name: Prune everything (including non-dangling images)
|
||||
docker_prune:
|
||||
containers: yes
|
||||
images: yes
|
||||
images_filters:
|
||||
dangling: false
|
||||
networks: yes
|
||||
volumes: yes
|
||||
builder_cache: yes
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
# containers
|
||||
containers:
|
||||
description:
|
||||
- List of IDs of deleted containers.
|
||||
returned: I(containers) is C(true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: '[]'
|
||||
containers_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from container pruning in bytes.
|
||||
returned: I(containers) is C(true)
|
||||
type: int
|
||||
sample: '0'
|
||||
|
||||
# images
|
||||
images:
|
||||
description:
|
||||
- List of IDs of deleted images.
|
||||
returned: I(images) is C(true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: '[]'
|
||||
images_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from image pruning in bytes.
|
||||
returned: I(images) is C(true)
|
||||
type: int
|
||||
sample: '0'
|
||||
|
||||
# networks
|
||||
networks:
|
||||
description:
|
||||
- List of IDs of deleted networks.
|
||||
returned: I(networks) is C(true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: '[]'
|
||||
|
||||
# volumes
|
||||
volumes:
|
||||
description:
|
||||
- List of IDs of deleted volumes.
|
||||
returned: I(volumes) is C(true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: '[]'
|
||||
volumes_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from volumes pruning in bytes.
|
||||
returned: I(volumes) is C(true)
|
||||
type: int
|
||||
sample: '0'
|
||||
|
||||
# builder_cache
|
||||
builder_cache_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from builder cache pruning in bytes.
|
||||
returned: I(builder_cache) is C(true)
|
||||
type: int
|
||||
sample: '0'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
try:
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import docker_version, clean_dict_booleans_for_docker_api
|
||||
except Exception as dummy:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
containers=dict(type='bool', default=False),
|
||||
containers_filters=dict(type='dict'),
|
||||
images=dict(type='bool', default=False),
|
||||
images_filters=dict(type='dict'),
|
||||
networks=dict(type='bool', default=False),
|
||||
networks_filters=dict(type='dict'),
|
||||
volumes=dict(type='bool', default=False),
|
||||
volumes_filters=dict(type='dict'),
|
||||
builder_cache=dict(type='bool', default=False),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
# supports_check_mode=True,
|
||||
min_docker_api_version='1.25',
|
||||
min_docker_version='2.1.0',
|
||||
)
|
||||
|
||||
# Version checks
|
||||
cache_min_version = '3.3.0'
|
||||
if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version):
|
||||
msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade."
|
||||
client.fail(msg % (docker_version, cache_min_version))
|
||||
|
||||
try:
|
||||
result = dict()
|
||||
|
||||
if client.module.params['containers']:
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
|
||||
res = client.prune_containers(filters=filters)
|
||||
result['containers'] = res.get('ContainersDeleted') or []
|
||||
result['containers_space_reclaimed'] = res['SpaceReclaimed']
|
||||
|
||||
if client.module.params['images']:
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
|
||||
res = client.prune_images(filters=filters)
|
||||
result['images'] = res.get('ImagesDeleted') or []
|
||||
result['images_space_reclaimed'] = res['SpaceReclaimed']
|
||||
|
||||
if client.module.params['networks']:
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
|
||||
res = client.prune_networks(filters=filters)
|
||||
result['networks'] = res.get('NetworksDeleted') or []
|
||||
|
||||
if client.module.params['volumes']:
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
|
||||
res = client.prune_volumes(filters=filters)
|
||||
result['volumes'] = res.get('VolumesDeleted') or []
|
||||
result['volumes_space_reclaimed'] = res['SpaceReclaimed']
|
||||
|
||||
if client.module.params['builder_cache']:
|
||||
res = client.prune_builds()
|
||||
result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
|
||||
|
||||
client.module.exit_json(**result)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
304
plugins/modules/docker_secret.py
Normal file
304
plugins/modules/docker_secret.py
Normal file
@ -0,0 +1,304 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_secret
|
||||
|
||||
short_description: Manage docker secrets.
|
||||
|
||||
|
||||
description:
|
||||
- Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
|
||||
- Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
|
||||
in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated
|
||||
unless the I(force) option is set.
|
||||
- Updates to secrets are performed by removing the secret and creating it again.
|
||||
options:
|
||||
data:
|
||||
description:
|
||||
- The value of the secret. Required when state is C(present).
|
||||
type: str
|
||||
data_is_b64:
|
||||
description:
|
||||
- If set to C(true), the data is assumed to be Base64 encoded and will be
|
||||
decoded before being used.
|
||||
- To use binary I(data), it is better to keep it Base64 encoded and let it
|
||||
be decoded by this option.
|
||||
type: bool
|
||||
default: no
|
||||
labels:
|
||||
description:
|
||||
- "A map of key:value meta data, where both key and value are expected to be strings."
|
||||
- If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
|
||||
type: dict
|
||||
force:
|
||||
description:
|
||||
- Use with state C(present) to always remove and recreate an existing secret.
|
||||
- If C(true), an existing secret will be replaced, even if it has not changed.
|
||||
type: bool
|
||||
default: no
|
||||
name:
|
||||
description:
|
||||
- The name of the secret.
|
||||
type: str
|
||||
required: yes
|
||||
state:
|
||||
description:
|
||||
- Set to C(present), if the secret should exist, and C(absent), if it should not.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_2_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Create secret foo (from a file on the control machine)
|
||||
docker_secret:
|
||||
name: foo
|
||||
# If the file is JSON or binary, Ansible might modify it (because
|
||||
# it is first decoded and later re-encoded). Base64-encoding the
|
||||
# file directly after reading it prevents this to happen.
|
||||
data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
|
||||
data_is_b64: true
|
||||
state: present
|
||||
|
||||
- name: Change the secret data
|
||||
docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Add a new label
|
||||
docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Adding a new label will cause a remove/create of the secret
|
||||
two: '2'
|
||||
state: present
|
||||
|
||||
- name: No change
|
||||
docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Even though 'two' is missing, there is no change to the existing secret
|
||||
state: present
|
||||
|
||||
- name: Update an existing label
|
||||
docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: monkey # Changing a label will cause a remove/create of the secret
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Force the removal/creation of the secret
|
||||
docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
force: yes
|
||||
state: present
|
||||
|
||||
- name: Remove secret foo
|
||||
docker_secret:
|
||||
name: foo
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
secret_id:
|
||||
description:
|
||||
- The ID assigned by Docker to the secret object.
|
||||
returned: success and I(state) is C(present)
|
||||
type: str
|
||||
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
|
||||
'''
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
compare_generic,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils._text import to_native, to_bytes
|
||||
|
||||
|
||||
class SecretManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(SecretManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
parameters = self.client.module.params
|
||||
self.name = parameters.get('name')
|
||||
self.state = parameters.get('state')
|
||||
self.data = parameters.get('data')
|
||||
if self.data is not None:
|
||||
if parameters.get('data_is_b64'):
|
||||
self.data = base64.b64decode(self.data)
|
||||
else:
|
||||
self.data = to_bytes(self.data)
|
||||
self.labels = parameters.get('labels')
|
||||
self.force = parameters.get('force')
|
||||
self.data_key = None
|
||||
|
||||
def __call__(self):
|
||||
if self.state == 'present':
|
||||
self.data_key = hashlib.sha224(self.data).hexdigest()
|
||||
self.present()
|
||||
elif self.state == 'absent':
|
||||
self.absent()
|
||||
|
||||
def get_secret(self):
|
||||
''' Find an existing secret. '''
|
||||
try:
|
||||
secrets = self.client.secrets(filters={'name': self.name})
|
||||
except APIError as exc:
|
||||
self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
|
||||
|
||||
for secret in secrets:
|
||||
if secret['Spec']['Name'] == self.name:
|
||||
return secret
|
||||
return None
|
||||
|
||||
def create_secret(self):
|
||||
''' Create a new secret '''
|
||||
secret_id = None
|
||||
# We can't see the data after creation, so adding a label we can use for idempotency check
|
||||
labels = {
|
||||
'ansible_key': self.data_key
|
||||
}
|
||||
if self.labels:
|
||||
labels.update(self.labels)
|
||||
|
||||
try:
|
||||
if not self.check_mode:
|
||||
secret_id = self.client.create_secret(self.name, self.data, labels=labels)
|
||||
except APIError as exc:
|
||||
self.client.fail("Error creating secret: %s" % to_native(exc))
|
||||
|
||||
if isinstance(secret_id, dict):
|
||||
secret_id = secret_id['ID']
|
||||
|
||||
return secret_id
|
||||
|
||||
def present(self):
|
||||
''' Handles state == 'present', creating or updating the secret '''
|
||||
secret = self.get_secret()
|
||||
if secret:
|
||||
self.results['secret_id'] = secret['ID']
|
||||
data_changed = False
|
||||
attrs = secret.get('Spec', {})
|
||||
if attrs.get('Labels', {}).get('ansible_key'):
|
||||
if attrs['Labels']['ansible_key'] != self.data_key:
|
||||
data_changed = True
|
||||
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
|
||||
if data_changed or labels_changed or self.force:
|
||||
# if something changed or force, delete and re-create the secret
|
||||
self.absent()
|
||||
secret_id = self.create_secret()
|
||||
self.results['changed'] = True
|
||||
self.results['secret_id'] = secret_id
|
||||
else:
|
||||
self.results['changed'] = True
|
||||
self.results['secret_id'] = self.create_secret()
|
||||
|
||||
def absent(self):
|
||||
''' Handles state == 'absent', removing the secret '''
|
||||
secret = self.get_secret()
|
||||
if secret:
|
||||
try:
|
||||
if not self.check_mode:
|
||||
self.client.remove_secret(secret['ID'])
|
||||
except APIError as exc:
|
||||
self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
|
||||
self.results['changed'] = True
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
data=dict(type='str', no_log=True),
|
||||
data_is_b64=dict(type='bool', default=False),
|
||||
labels=dict(type='dict'),
|
||||
force=dict(type='bool', default=False)
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['data'])
|
||||
]
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
min_docker_version='2.1.0',
|
||||
min_docker_api_version='1.25',
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
secret_id=''
|
||||
)
|
||||
|
||||
SecretManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1
plugins/modules/docker_service.py
Symbolic link
1
plugins/modules/docker_service.py
Symbolic link
@ -0,0 +1 @@
|
||||
docker_compose.py
|
||||
312
plugins/modules/docker_stack.py
Normal file
312
plugins/modules/docker_stack.py
Normal file
@ -0,0 +1,312 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'metadata_version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_stack
|
||||
author: "Dario Zanzico (@dariko)"
|
||||
short_description: docker stack module
|
||||
description:
|
||||
- Manage docker stacks using the 'docker stack' command
|
||||
on the target node (see examples).
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Stack name
|
||||
type: str
|
||||
required: yes
|
||||
state:
|
||||
description:
|
||||
- Service state.
|
||||
type: str
|
||||
default: "present"
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
compose:
|
||||
description:
|
||||
- List of compose definitions. Any element may be a string
|
||||
referring to the path of the compose file on the target host
|
||||
or the YAML contents of a compose file nested as dictionary.
|
||||
type: list
|
||||
# elements: raw
|
||||
default: []
|
||||
prune:
|
||||
description:
|
||||
- If true will add the C(--prune) option to the C(docker stack deploy) command.
|
||||
This will have docker remove the services not present in the
|
||||
current stack definition.
|
||||
type: bool
|
||||
default: no
|
||||
with_registry_auth:
|
||||
description:
|
||||
- If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
|
||||
This will have docker send registry authentication details to Swarm agents.
|
||||
type: bool
|
||||
default: no
|
||||
resolve_image:
|
||||
description:
|
||||
- If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
|
||||
This will have docker query the registry to resolve image digest and
|
||||
supported platforms. If not set, docker use "always" by default.
|
||||
type: str
|
||||
choices: ["always", "changed", "never"]
|
||||
absent_retries:
|
||||
description:
|
||||
- If C(>0) and I(state) is C(absent) the module will retry up to
|
||||
I(absent_retries) times to delete the stack until all the
|
||||
resources have been effectively deleted.
|
||||
If the last try still reports the stack as not completely
|
||||
removed the module will fail.
|
||||
type: int
|
||||
default: 0
|
||||
absent_retries_interval:
|
||||
description:
|
||||
- Interval in seconds between consecutive I(absent_retries).
|
||||
type: int
|
||||
default: 1
|
||||
|
||||
requirements:
|
||||
- jsondiff
|
||||
- pyyaml
|
||||
|
||||
notes:
|
||||
- Return values I(out) and I(err) have been deprecated and will be removed in Ansible 2.14. Use I(stdout) and I(stderr) instead.
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
stack_spec_diff:
|
||||
description: |
|
||||
dictionary containing the differences between the 'Spec' field
|
||||
of the stack services before and after applying the new stack
|
||||
definition.
|
||||
sample: >
|
||||
"stack_spec_diff":
|
||||
{'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
|
||||
returned: on change
|
||||
type: dict
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Deploy stack from a compose file
|
||||
docker_stack:
|
||||
state: present
|
||||
name: mystack
|
||||
compose:
|
||||
- /opt/docker-compose.yml
|
||||
|
||||
- name: Deploy stack from base compose file and override the web service
|
||||
docker_stack:
|
||||
state: present
|
||||
name: mystack
|
||||
compose:
|
||||
- /opt/docker-compose.yml
|
||||
- version: '3'
|
||||
services:
|
||||
web:
|
||||
image: nginx:latest
|
||||
environment:
|
||||
ENVVAR: envvar
|
||||
|
||||
- name: Remove stack
|
||||
docker_stack:
|
||||
name: mystack
|
||||
state: absent
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from ansible.module_utils.six import string_types
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from jsondiff import diff as json_diff
|
||||
HAS_JSONDIFF = True
|
||||
except ImportError:
|
||||
HAS_JSONDIFF = False
|
||||
|
||||
try:
|
||||
from yaml import dump as yaml_dump
|
||||
HAS_YAML = True
|
||||
except ImportError:
|
||||
HAS_YAML = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, os
|
||||
|
||||
|
||||
def docker_stack_services(module, stack_name):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
rc, out, err = module.run_command([docker_bin,
|
||||
"stack",
|
||||
"services",
|
||||
stack_name,
|
||||
"--format",
|
||||
"{{.Name}}"])
|
||||
if err == "Nothing found in stack: %s\n" % stack_name:
|
||||
return []
|
||||
return out.strip().split('\n')
|
||||
|
||||
|
||||
def docker_service_inspect(module, service_name):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
rc, out, err = module.run_command([docker_bin,
|
||||
"service",
|
||||
"inspect",
|
||||
service_name])
|
||||
if rc != 0:
|
||||
return None
|
||||
else:
|
||||
ret = json.loads(out)[0]['Spec']
|
||||
return ret
|
||||
|
||||
|
||||
def docker_stack_deploy(module, stack_name, compose_files):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
command = [docker_bin, "stack", "deploy"]
|
||||
if module.params["prune"]:
|
||||
command += ["--prune"]
|
||||
if module.params["with_registry_auth"]:
|
||||
command += ["--with-registry-auth"]
|
||||
if module.params["resolve_image"]:
|
||||
command += ["--resolve-image",
|
||||
module.params["resolve_image"]]
|
||||
for compose_file in compose_files:
|
||||
command += ["--compose-file",
|
||||
compose_file]
|
||||
command += [stack_name]
|
||||
return module.run_command(command)
|
||||
|
||||
|
||||
def docker_stack_inspect(module, stack_name):
|
||||
ret = {}
|
||||
for service_name in docker_stack_services(module, stack_name):
|
||||
ret[service_name] = docker_service_inspect(module, service_name)
|
||||
return ret
|
||||
|
||||
|
||||
def docker_stack_rm(module, stack_name, retries, interval):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
command = [docker_bin, "stack", "rm", stack_name]
|
||||
|
||||
rc, out, err = module.run_command(command)
|
||||
|
||||
while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
|
||||
sleep(interval)
|
||||
retries = retries - 1
|
||||
rc, out, err = module.run_command(command)
|
||||
return rc, out, err
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec={
|
||||
'name': dict(type='str', required=True),
|
||||
'compose': dict(type='list', elements='raw', default=[]),
|
||||
'prune': dict(type='bool', default=False),
|
||||
'with_registry_auth': dict(type='bool', default=False),
|
||||
'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
|
||||
'state': dict(type='str', default='present', choices=['present', 'absent']),
|
||||
'absent_retries': dict(type='int', default=0),
|
||||
'absent_retries_interval': dict(type='int', default=1)
|
||||
},
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
if not HAS_JSONDIFF:
|
||||
return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
|
||||
|
||||
if not HAS_YAML:
|
||||
return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
|
||||
|
||||
state = module.params['state']
|
||||
compose = module.params['compose']
|
||||
name = module.params['name']
|
||||
absent_retries = module.params['absent_retries']
|
||||
absent_retries_interval = module.params['absent_retries_interval']
|
||||
|
||||
if state == 'present':
|
||||
if not compose:
|
||||
module.fail_json(msg=("compose parameter must be a list "
|
||||
"containing at least one element"))
|
||||
|
||||
compose_files = []
|
||||
for i, compose_def in enumerate(compose):
|
||||
if isinstance(compose_def, dict):
|
||||
compose_file_fd, compose_file = tempfile.mkstemp()
|
||||
module.add_cleanup_file(compose_file)
|
||||
with os.fdopen(compose_file_fd, 'w') as stack_file:
|
||||
compose_files.append(compose_file)
|
||||
stack_file.write(yaml_dump(compose_def))
|
||||
elif isinstance(compose_def, string_types):
|
||||
compose_files.append(compose_def)
|
||||
else:
|
||||
module.fail_json(msg="compose element '%s' must be a " +
|
||||
"string or a dictionary" % compose_def)
|
||||
|
||||
before_stack_services = docker_stack_inspect(module, name)
|
||||
|
||||
rc, out, err = docker_stack_deploy(module, name, compose_files)
|
||||
|
||||
after_stack_services = docker_stack_inspect(module, name)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="docker stack up deploy command failed",
|
||||
rc=rc,
|
||||
out=out, err=err, # Deprecated
|
||||
stdout=out, stderr=err)
|
||||
|
||||
before_after_differences = json_diff(before_stack_services,
|
||||
after_stack_services)
|
||||
for k in before_after_differences.keys():
|
||||
if isinstance(before_after_differences[k], dict):
|
||||
before_after_differences[k].pop('UpdatedAt', None)
|
||||
before_after_differences[k].pop('Version', None)
|
||||
if not list(before_after_differences[k].keys()):
|
||||
before_after_differences.pop(k)
|
||||
|
||||
if not before_after_differences:
|
||||
module.exit_json(
|
||||
changed=False,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err)
|
||||
else:
|
||||
module.exit_json(
|
||||
changed=True,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
stack_spec_diff=json_diff(before_stack_services,
|
||||
after_stack_services,
|
||||
dump=True))
|
||||
|
||||
else:
|
||||
if docker_stack_services(module, name):
|
||||
rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="'docker stack down' command failed",
|
||||
rc=rc,
|
||||
out=out, err=err, # Deprecated
|
||||
stdout=out, stderr=err)
|
||||
else:
|
||||
module.exit_json(changed=True,
|
||||
msg=out, rc=rc,
|
||||
err=err, # Deprecated
|
||||
stdout=out, stderr=err)
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
679
plugins/modules/docker_swarm.py
Normal file
679
plugins/modules/docker_swarm.py
Normal file
@ -0,0 +1,679 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_swarm
|
||||
short_description: Manage Swarm cluster
|
||||
description:
|
||||
- Create a new Swarm cluster.
|
||||
- Add/Remove nodes or managers to an existing cluster.
|
||||
options:
|
||||
advertise_addr:
|
||||
description:
|
||||
- Externally reachable address advertised to other nodes.
|
||||
- This can either be an address/port combination
|
||||
in the form C(192.168.1.1:4567), or an interface followed by a
|
||||
port number, like C(eth0:4567).
|
||||
- If the port number is omitted,
|
||||
the port number from the listen address is used.
|
||||
- If I(advertise_addr) is not specified, it will be automatically
|
||||
detected when possible.
|
||||
- Only used when swarm is initialised or joined. Because of this it's not
|
||||
considered for idempotency checking.
|
||||
type: str
|
||||
default_addr_pool:
|
||||
description:
|
||||
- Default address pool in CIDR format.
|
||||
- Only used when swarm is initialised. Because of this it's not considered
|
||||
for idempotency checking.
|
||||
- Requires API version >= 1.39.
|
||||
type: list
|
||||
elements: str
|
||||
subnet_size:
|
||||
description:
|
||||
- Default address pool subnet mask length.
|
||||
- Only used when swarm is initialised. Because of this it's not considered
|
||||
for idempotency checking.
|
||||
- Requires API version >= 1.39.
|
||||
type: int
|
||||
listen_addr:
|
||||
description:
|
||||
- Listen address used for inter-manager communication.
|
||||
- This can either be an address/port combination in the form
|
||||
C(192.168.1.1:4567), or an interface followed by a port number,
|
||||
like C(eth0:4567).
|
||||
- If the port number is omitted, the default swarm listening port
|
||||
is used.
|
||||
- Only used when swarm is initialised or joined. Because of this it's not
|
||||
considered for idempotency checking.
|
||||
type: str
|
||||
default: 0.0.0.0:2377
|
||||
force:
|
||||
description:
|
||||
- Use with state C(present) to force creating a new Swarm, even if already part of one.
|
||||
- Use with state C(absent) to Leave the swarm even if this node is a manager.
|
||||
type: bool
|
||||
default: no
|
||||
state:
|
||||
description:
|
||||
- Set to C(present), to create/update a new cluster.
|
||||
- Set to C(join), to join an existing cluster.
|
||||
- Set to C(absent), to leave an existing cluster.
|
||||
- Set to C(remove), to remove an absent node from the cluster.
|
||||
Note that removing requires Docker SDK for Python >= 2.4.0.
|
||||
- Set to C(inspect) to display swarm informations.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
- join
|
||||
- absent
|
||||
- remove
|
||||
- inspect
|
||||
node_id:
|
||||
description:
|
||||
- Swarm id of the node to remove.
|
||||
- Used with I(state=remove).
|
||||
type: str
|
||||
join_token:
|
||||
description:
|
||||
- Swarm token used to join a swarm cluster.
|
||||
- Used with I(state=join).
|
||||
type: str
|
||||
remote_addrs:
|
||||
description:
|
||||
- Remote address of one or more manager nodes of an existing Swarm to connect to.
|
||||
- Used with I(state=join).
|
||||
type: list
|
||||
elements: str
|
||||
task_history_retention_limit:
|
||||
description:
|
||||
- Maximum number of tasks history stored.
|
||||
- Docker default value is C(5).
|
||||
type: int
|
||||
snapshot_interval:
|
||||
description:
|
||||
- Number of logs entries between snapshot.
|
||||
- Docker default value is C(10000).
|
||||
type: int
|
||||
keep_old_snapshots:
|
||||
description:
|
||||
- Number of snapshots to keep beyond the current snapshot.
|
||||
- Docker default value is C(0).
|
||||
type: int
|
||||
log_entries_for_slow_followers:
|
||||
description:
|
||||
- Number of log entries to keep around to sync up slow followers after a snapshot is created.
|
||||
type: int
|
||||
heartbeat_tick:
|
||||
description:
|
||||
- Amount of ticks (in seconds) between each heartbeat.
|
||||
- Docker default value is C(1s).
|
||||
type: int
|
||||
election_tick:
|
||||
description:
|
||||
- Amount of ticks (in seconds) needed without a leader to trigger a new election.
|
||||
- Docker default value is C(10s).
|
||||
type: int
|
||||
dispatcher_heartbeat_period:
|
||||
description:
|
||||
- The delay for an agent to send a heartbeat to the dispatcher.
|
||||
- Docker default value is C(5s).
|
||||
type: int
|
||||
node_cert_expiry:
|
||||
description:
|
||||
- Automatic expiry for nodes certificates.
|
||||
- Docker default value is C(3months).
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- The name of the swarm.
|
||||
type: str
|
||||
labels:
|
||||
description:
|
||||
- User-defined key/value metadata.
|
||||
- Label operations in this module apply to the docker swarm cluster.
|
||||
Use M(docker_node) module to add/modify/remove swarm node labels.
|
||||
- Requires API version >= 1.32.
|
||||
type: dict
|
||||
signing_ca_cert:
|
||||
description:
|
||||
- The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
|
||||
- This must not be a path to a certificate, but the contents of the certificate.
|
||||
- Requires API version >= 1.30.
|
||||
type: str
|
||||
signing_ca_key:
|
||||
description:
|
||||
- The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
|
||||
- This must not be a path to a key, but the contents of the key.
|
||||
- Requires API version >= 1.30.
|
||||
type: str
|
||||
ca_force_rotate:
|
||||
description:
|
||||
- An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
|
||||
if none have been specified.
|
||||
- Docker default value is C(0).
|
||||
- Requires API version >= 1.30.
|
||||
type: int
|
||||
autolock_managers:
|
||||
description:
|
||||
- If set, generate a key and use it to lock data stored on the managers.
|
||||
- Docker default value is C(no).
|
||||
- M(docker_swarm_info) can be used to retrieve the unlock key.
|
||||
type: bool
|
||||
rotate_worker_token:
|
||||
description: Rotate the worker join token.
|
||||
type: bool
|
||||
default: no
|
||||
rotate_manager_token:
|
||||
description: Rotate the manager join token.
|
||||
type: bool
|
||||
default: no
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- Docker API >= 1.25
|
||||
author:
|
||||
- Thierry Bouvet (@tbouvet)
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Init a new swarm with default parameters
|
||||
docker_swarm:
|
||||
state: present
|
||||
|
||||
- name: Update swarm configuration
|
||||
docker_swarm:
|
||||
state: present
|
||||
election_tick: 5
|
||||
|
||||
- name: Add nodes
|
||||
docker_swarm:
|
||||
state: join
|
||||
advertise_addr: 192.168.1.2
|
||||
join_token: SWMTKN-1--xxxxx
|
||||
remote_addrs: [ '192.168.1.1:2377' ]
|
||||
|
||||
- name: Leave swarm for a node
|
||||
docker_swarm:
|
||||
state: absent
|
||||
|
||||
- name: Remove a swarm manager
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
|
||||
- name: Remove node from swarm
|
||||
docker_swarm:
|
||||
state: remove
|
||||
node_id: mynode
|
||||
|
||||
- name: Inspect swarm
|
||||
docker_swarm:
|
||||
state: inspect
|
||||
register: swarm_info
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
swarm_facts:
|
||||
description: Informations about swarm.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
JoinTokens:
|
||||
description: Tokens to connect to the Swarm.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
Worker:
|
||||
description: Token to create a new *worker* node
|
||||
returned: success
|
||||
type: str
|
||||
example: SWMTKN-1--xxxxx
|
||||
Manager:
|
||||
description: Token to create a new *manager* node
|
||||
returned: success
|
||||
type: str
|
||||
example: SWMTKN-1--xxxxx
|
||||
UnlockKey:
|
||||
description: The swarm unlock-key if I(autolock_managers) is C(true).
|
||||
returned: on success if I(autolock_managers) is C(true)
|
||||
and swarm is initialised, or if I(autolock_managers) has changed.
|
||||
type: str
|
||||
example: SWMKEY-1-xxx
|
||||
|
||||
actions:
|
||||
description: Provides the actions done on the swarm.
|
||||
returned: when action failed.
|
||||
type: list
|
||||
elements: str
|
||||
example: "['This cluster is already a swarm cluster']"
|
||||
|
||||
'''
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
DockerBaseClass,
|
||||
DifferenceTracker,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self):
|
||||
super(TaskParameters, self).__init__()
|
||||
|
||||
self.advertise_addr = None
|
||||
self.listen_addr = None
|
||||
self.remote_addrs = None
|
||||
self.join_token = None
|
||||
|
||||
# Spec
|
||||
self.snapshot_interval = None
|
||||
self.task_history_retention_limit = None
|
||||
self.keep_old_snapshots = None
|
||||
self.log_entries_for_slow_followers = None
|
||||
self.heartbeat_tick = None
|
||||
self.election_tick = None
|
||||
self.dispatcher_heartbeat_period = None
|
||||
self.node_cert_expiry = None
|
||||
self.name = None
|
||||
self.labels = None
|
||||
self.log_driver = None
|
||||
self.signing_ca_cert = None
|
||||
self.signing_ca_key = None
|
||||
self.ca_force_rotate = None
|
||||
self.autolock_managers = None
|
||||
self.rotate_worker_token = None
|
||||
self.rotate_manager_token = None
|
||||
self.default_addr_pool = None
|
||||
self.subnet_size = None
|
||||
|
||||
@staticmethod
|
||||
def from_ansible_params(client):
|
||||
result = TaskParameters()
|
||||
for key, value in client.module.params.items():
|
||||
if key in result.__dict__:
|
||||
setattr(result, key, value)
|
||||
|
||||
result.update_parameters(client)
|
||||
return result
|
||||
|
||||
def update_from_swarm_info(self, swarm_info):
|
||||
spec = swarm_info['Spec']
|
||||
|
||||
ca_config = spec.get('CAConfig') or dict()
|
||||
if self.node_cert_expiry is None:
|
||||
self.node_cert_expiry = ca_config.get('NodeCertExpiry')
|
||||
if self.ca_force_rotate is None:
|
||||
self.ca_force_rotate = ca_config.get('ForceRotate')
|
||||
|
||||
dispatcher = spec.get('Dispatcher') or dict()
|
||||
if self.dispatcher_heartbeat_period is None:
|
||||
self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
|
||||
|
||||
raft = spec.get('Raft') or dict()
|
||||
if self.snapshot_interval is None:
|
||||
self.snapshot_interval = raft.get('SnapshotInterval')
|
||||
if self.keep_old_snapshots is None:
|
||||
self.keep_old_snapshots = raft.get('KeepOldSnapshots')
|
||||
if self.heartbeat_tick is None:
|
||||
self.heartbeat_tick = raft.get('HeartbeatTick')
|
||||
if self.log_entries_for_slow_followers is None:
|
||||
self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
|
||||
if self.election_tick is None:
|
||||
self.election_tick = raft.get('ElectionTick')
|
||||
|
||||
orchestration = spec.get('Orchestration') or dict()
|
||||
if self.task_history_retention_limit is None:
|
||||
self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
|
||||
|
||||
encryption_config = spec.get('EncryptionConfig') or dict()
|
||||
if self.autolock_managers is None:
|
||||
self.autolock_managers = encryption_config.get('AutoLockManagers')
|
||||
|
||||
if self.name is None:
|
||||
self.name = spec['Name']
|
||||
|
||||
if self.labels is None:
|
||||
self.labels = spec.get('Labels') or {}
|
||||
|
||||
if 'LogDriver' in spec['TaskDefaults']:
|
||||
self.log_driver = spec['TaskDefaults']['LogDriver']
|
||||
|
||||
def update_parameters(self, client):
|
||||
assign = dict(
|
||||
snapshot_interval='snapshot_interval',
|
||||
task_history_retention_limit='task_history_retention_limit',
|
||||
keep_old_snapshots='keep_old_snapshots',
|
||||
log_entries_for_slow_followers='log_entries_for_slow_followers',
|
||||
heartbeat_tick='heartbeat_tick',
|
||||
election_tick='election_tick',
|
||||
dispatcher_heartbeat_period='dispatcher_heartbeat_period',
|
||||
node_cert_expiry='node_cert_expiry',
|
||||
name='name',
|
||||
labels='labels',
|
||||
signing_ca_cert='signing_ca_cert',
|
||||
signing_ca_key='signing_ca_key',
|
||||
ca_force_rotate='ca_force_rotate',
|
||||
autolock_managers='autolock_managers',
|
||||
log_driver='log_driver',
|
||||
)
|
||||
params = dict()
|
||||
for dest, source in assign.items():
|
||||
if not client.option_minimal_versions[source]['supported']:
|
||||
continue
|
||||
value = getattr(self, source)
|
||||
if value is not None:
|
||||
params[dest] = value
|
||||
self.spec = client.create_swarm_spec(**params)
|
||||
|
||||
def compare_to_active(self, other, client, differences):
|
||||
for k in self.__dict__:
|
||||
if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
|
||||
'rotate_worker_token', 'rotate_manager_token', 'spec',
|
||||
'default_addr_pool', 'subnet_size'):
|
||||
continue
|
||||
if not client.option_minimal_versions[k]['supported']:
|
||||
continue
|
||||
value = getattr(self, k)
|
||||
if value is None:
|
||||
continue
|
||||
other_value = getattr(other, k)
|
||||
if value != other_value:
|
||||
differences.add(k, parameter=value, active=other_value)
|
||||
if self.rotate_worker_token:
|
||||
differences.add('rotate_worker_token', parameter=True, active=False)
|
||||
if self.rotate_manager_token:
|
||||
differences.add('rotate_manager_token', parameter=True, active=False)
|
||||
return differences
|
||||
|
||||
|
||||
class SwarmManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(SwarmManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
self.swarm_info = {}
|
||||
|
||||
self.state = client.module.params['state']
|
||||
self.force = client.module.params['force']
|
||||
self.node_id = client.module.params['node_id']
|
||||
|
||||
self.differences = DifferenceTracker()
|
||||
self.parameters = TaskParameters.from_ansible_params(client)
|
||||
|
||||
self.created = False
|
||||
|
||||
def __call__(self):
|
||||
choice_map = {
|
||||
"present": self.init_swarm,
|
||||
"join": self.join,
|
||||
"absent": self.leave,
|
||||
"remove": self.remove,
|
||||
"inspect": self.inspect_swarm
|
||||
}
|
||||
|
||||
if self.state == 'inspect':
|
||||
self.client.module.deprecate(
|
||||
"The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster",
|
||||
version='2.12')
|
||||
|
||||
choice_map.get(self.state)()
|
||||
|
||||
if self.client.module._diff or self.parameters.debug:
|
||||
diff = dict()
|
||||
diff['before'], diff['after'] = self.differences.get_before_after()
|
||||
self.results['diff'] = diff
|
||||
|
||||
def inspect_swarm(self):
|
||||
try:
|
||||
data = self.client.inspect_swarm()
|
||||
json_str = json.dumps(data, ensure_ascii=False)
|
||||
self.swarm_info = json.loads(json_str)
|
||||
|
||||
self.results['changed'] = False
|
||||
self.results['swarm_facts'] = self.swarm_info
|
||||
|
||||
unlock_key = self.get_unlock_key()
|
||||
self.swarm_info.update(unlock_key)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
def get_unlock_key(self):
|
||||
default = {'UnlockKey': None}
|
||||
if not self.has_swarm_lock_changed():
|
||||
return default
|
||||
try:
|
||||
return self.client.get_unlock_key() or default
|
||||
except APIError:
|
||||
return default
|
||||
|
||||
def has_swarm_lock_changed(self):
|
||||
return self.parameters.autolock_managers and (
|
||||
self.created or self.differences.has_difference_for('autolock_managers')
|
||||
)
|
||||
|
||||
def init_swarm(self):
|
||||
if not self.force and self.client.check_if_swarm_manager():
|
||||
self.__update_swarm()
|
||||
return
|
||||
|
||||
if not self.check_mode:
|
||||
init_arguments = {
|
||||
'advertise_addr': self.parameters.advertise_addr,
|
||||
'listen_addr': self.parameters.listen_addr,
|
||||
'force_new_cluster': self.force,
|
||||
'swarm_spec': self.parameters.spec,
|
||||
}
|
||||
if self.parameters.default_addr_pool is not None:
|
||||
init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
|
||||
if self.parameters.subnet_size is not None:
|
||||
init_arguments['subnet_size'] = self.parameters.subnet_size
|
||||
try:
|
||||
self.client.init_swarm(**init_arguments)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
|
||||
|
||||
if not self.client.check_if_swarm_manager():
|
||||
if not self.check_mode:
|
||||
self.client.fail("Swarm not created or other error!")
|
||||
|
||||
self.created = True
|
||||
self.inspect_swarm()
|
||||
self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
|
||||
self.differences.add('state', parameter='present', active='absent')
|
||||
self.results['changed'] = True
|
||||
self.results['swarm_facts'] = {
|
||||
'JoinTokens': self.swarm_info.get('JoinTokens'),
|
||||
'UnlockKey': self.swarm_info.get('UnlockKey')
|
||||
}
|
||||
|
||||
def __update_swarm(self):
|
||||
try:
|
||||
self.inspect_swarm()
|
||||
version = self.swarm_info['Version']['Index']
|
||||
self.parameters.update_from_swarm_info(self.swarm_info)
|
||||
old_parameters = TaskParameters()
|
||||
old_parameters.update_from_swarm_info(self.swarm_info)
|
||||
self.parameters.compare_to_active(old_parameters, self.client, self.differences)
|
||||
if self.differences.empty:
|
||||
self.results['actions'].append("No modification")
|
||||
self.results['changed'] = False
|
||||
return
|
||||
update_parameters = TaskParameters.from_ansible_params(self.client)
|
||||
update_parameters.update_parameters(self.client)
|
||||
if not self.check_mode:
|
||||
self.client.update_swarm(
|
||||
version=version, swarm_spec=update_parameters.spec,
|
||||
rotate_worker_token=self.parameters.rotate_worker_token,
|
||||
rotate_manager_token=self.parameters.rotate_manager_token)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
|
||||
return
|
||||
|
||||
self.inspect_swarm()
|
||||
self.results['actions'].append("Swarm cluster updated")
|
||||
self.results['changed'] = True
|
||||
|
||||
def join(self):
|
||||
if self.client.check_if_swarm_node():
|
||||
self.results['actions'].append("This node is already part of a swarm.")
|
||||
return
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.join_swarm(
|
||||
remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
|
||||
listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("New node is added to swarm cluster")
|
||||
self.differences.add('joined', parameter=True, active=False)
|
||||
self.results['changed'] = True
|
||||
|
||||
def leave(self):
|
||||
if not self.client.check_if_swarm_node():
|
||||
self.results['actions'].append("This node is not part of a swarm.")
|
||||
return
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.leave_swarm(force=self.force)
|
||||
except APIError as exc:
|
||||
self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("Node has left the swarm cluster")
|
||||
self.differences.add('joined', parameter='absent', active='present')
|
||||
self.results['changed'] = True
|
||||
|
||||
def remove(self):
|
||||
if not self.client.check_if_swarm_manager():
|
||||
self.client.fail("This node is not a manager.")
|
||||
|
||||
try:
|
||||
status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
if not status_down:
|
||||
self.client.fail("Can not remove the node. The status node is ready and not down.")
|
||||
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.remove_node(node_id=self.node_id, force=self.force)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("Node is removed from swarm cluster.")
|
||||
self.differences.add('joined', parameter=False, active=True)
|
||||
self.results['changed'] = True
|
||||
|
||||
|
||||
def _detect_remove_operation(client):
|
||||
return client.module.params['state'] == 'remove'
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
advertise_addr=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']),
|
||||
force=dict(type='bool', default=False),
|
||||
listen_addr=dict(type='str', default='0.0.0.0:2377'),
|
||||
remote_addrs=dict(type='list', elements='str'),
|
||||
join_token=dict(type='str'),
|
||||
snapshot_interval=dict(type='int'),
|
||||
task_history_retention_limit=dict(type='int'),
|
||||
keep_old_snapshots=dict(type='int'),
|
||||
log_entries_for_slow_followers=dict(type='int'),
|
||||
heartbeat_tick=dict(type='int'),
|
||||
election_tick=dict(type='int'),
|
||||
dispatcher_heartbeat_period=dict(type='int'),
|
||||
node_cert_expiry=dict(type='int'),
|
||||
name=dict(type='str'),
|
||||
labels=dict(type='dict'),
|
||||
signing_ca_cert=dict(type='str'),
|
||||
signing_ca_key=dict(type='str'),
|
||||
ca_force_rotate=dict(type='int'),
|
||||
autolock_managers=dict(type='bool'),
|
||||
node_id=dict(type='str'),
|
||||
rotate_worker_token=dict(type='bool', default=False),
|
||||
rotate_manager_token=dict(type='bool', default=False),
|
||||
default_addr_pool=dict(type='list', elements='str'),
|
||||
subnet_size=dict(type='int'),
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']),
|
||||
('state', 'remove', ['node_id'])
|
||||
]
|
||||
|
||||
option_minimal_versions = dict(
|
||||
labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
|
||||
signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
autolock_managers=dict(docker_py_version='2.6.0'),
|
||||
log_driver=dict(docker_py_version='2.6.0'),
|
||||
remove_operation=dict(
|
||||
docker_py_version='2.4.0',
|
||||
detect_usage=_detect_remove_operation,
|
||||
usage_msg='remove swarm nodes'
|
||||
),
|
||||
default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
|
||||
subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.25',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
result='',
|
||||
actions=[]
|
||||
)
|
||||
|
||||
SwarmManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
388
plugins/modules/docker_swarm_info.py
Normal file
388
plugins/modules/docker_swarm_info.py
Normal file
@ -0,0 +1,388 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_swarm_info
|
||||
|
||||
short_description: Retrieves facts about Docker Swarm cluster.
|
||||
|
||||
description:
|
||||
- Retrieves facts about a Docker Swarm.
|
||||
- Returns lists of swarm objects names for the services - nodes, services, tasks.
|
||||
- The output differs depending on API version available on docker host.
|
||||
- Must be run on Swarm Manager node; otherwise module fails with error message.
|
||||
It does return boolean flags in on both error and success which indicate whether
|
||||
the docker daemon can be communicated with, whether it is in Swarm mode, and
|
||||
whether it is a Swarm Manager node.
|
||||
|
||||
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
|
||||
options:
|
||||
nodes:
|
||||
description:
|
||||
- Whether to list swarm nodes.
|
||||
type: bool
|
||||
default: no
|
||||
nodes_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting nodes to list.
|
||||
- "For example, C(name: mynode)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
services:
|
||||
description:
|
||||
- Whether to list swarm services.
|
||||
type: bool
|
||||
default: no
|
||||
services_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting services to list.
|
||||
- "For example, C(name: myservice)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
tasks:
|
||||
description:
|
||||
- Whether to list containers.
|
||||
type: bool
|
||||
default: no
|
||||
tasks_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting tasks to list.
|
||||
- "For example, C(node: mynode-1)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
unlock_key:
|
||||
description:
|
||||
- Whether to retrieve the swarm unlock key.
|
||||
type: bool
|
||||
default: no
|
||||
verbose_output:
|
||||
description:
|
||||
- When set to C(yes) and I(nodes), I(services) or I(tasks) is set to C(yes), then the module output will
|
||||
contain verbose information about objects matching the full output of API method.
|
||||
- For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
|
||||
- The verbose output in this module contains only subset of information returned by I(_info) module
|
||||
for each type of the objects.
|
||||
type: bool
|
||||
default: no
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.24"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info on Docker Swarm
|
||||
docker_swarm_info:
|
||||
ignore_errors: yes
|
||||
register: result
|
||||
|
||||
- name: Inform about basic flags
|
||||
debug:
|
||||
msg: |
|
||||
Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
|
||||
Docker in Swarm mode: {{ result.docker_swarm_active }}
|
||||
This is a Manager node: {{ result.docker_swarm_manager }}
|
||||
|
||||
- block:
|
||||
|
||||
- name: Get info on Docker Swarm and list of registered nodes
|
||||
docker_swarm_info:
|
||||
nodes: yes
|
||||
register: result
|
||||
|
||||
- name: Get info on Docker Swarm and extended list of registered nodes
|
||||
docker_swarm_info:
|
||||
nodes: yes
|
||||
verbose_output: yes
|
||||
register: result
|
||||
|
||||
- name: Get info on Docker Swarm and filtered list of registered nodes
|
||||
docker_swarm_info:
|
||||
nodes: yes
|
||||
nodes_filter:
|
||||
name: mynode
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
var: result.swarm_facts
|
||||
|
||||
- name: Get the swarm unlock key
|
||||
docker_swarm_info:
|
||||
unlock_key: yes
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
var: result.swarm_unlock_key
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
can_talk_to_docker:
|
||||
description:
|
||||
- Will be C(true) if the module can talk to the docker daemon.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
docker_swarm_active:
|
||||
description:
|
||||
- Will be C(true) if the module can talk to the docker daemon,
|
||||
and the docker daemon is in Swarm mode.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
docker_swarm_manager:
|
||||
description:
|
||||
- Will be C(true) if the module can talk to the docker daemon,
|
||||
the docker daemon is in Swarm mode, and the current node is
|
||||
a manager node.
|
||||
- Only if this one is C(true), the module will not fail.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
swarm_facts:
|
||||
description:
|
||||
- Facts representing the basic state of the docker Swarm cluster.
|
||||
- Contains tokens to connect to the Swarm
|
||||
returned: always
|
||||
type: dict
|
||||
swarm_unlock_key:
|
||||
description:
|
||||
- Contains the key needed to unlock the swarm.
|
||||
returned: When I(unlock_key) is C(true).
|
||||
type: str
|
||||
nodes:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume.
|
||||
Keys matches the C(docker node ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(nodes) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
services:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume.
|
||||
Keys matches the C(docker service ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(services) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
tasks:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume.
|
||||
Keys matches the C(docker service ps) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(tasks) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker_common
|
||||
pass
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
DockerBaseClass,
|
||||
clean_dict_booleans_for_docker_api,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
class DockerSwarmManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(DockerSwarmManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.verbose_output = self.client.module.params['verbose_output']
|
||||
|
||||
listed_objects = ['tasks', 'services', 'nodes']
|
||||
|
||||
self.client.fail_task_if_not_swarm_manager()
|
||||
|
||||
self.results['swarm_facts'] = self.get_docker_swarm_facts()
|
||||
|
||||
for docker_object in listed_objects:
|
||||
if self.client.module.params[docker_object]:
|
||||
returned_name = docker_object
|
||||
filter_name = docker_object + "_filters"
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
|
||||
self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
|
||||
if self.client.module.params['unlock_key']:
|
||||
self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
|
||||
|
||||
def get_docker_swarm_facts(self):
|
||||
try:
|
||||
return self.client.inspect_swarm()
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
|
||||
|
||||
def get_docker_items_list(self, docker_object=None, filters=None):
|
||||
items = None
|
||||
items_list = []
|
||||
|
||||
try:
|
||||
if docker_object == 'nodes':
|
||||
items = self.client.nodes(filters=filters)
|
||||
elif docker_object == 'tasks':
|
||||
items = self.client.tasks(filters=filters)
|
||||
elif docker_object == 'services':
|
||||
items = self.client.services(filters=filters)
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker swarm for object '%s': %s" %
|
||||
(docker_object, to_native(exc)))
|
||||
|
||||
if self.verbose_output:
|
||||
return items
|
||||
|
||||
for item in items:
|
||||
item_record = dict()
|
||||
|
||||
if docker_object == 'nodes':
|
||||
item_record = self.get_essential_facts_nodes(item)
|
||||
elif docker_object == 'tasks':
|
||||
item_record = self.get_essential_facts_tasks(item)
|
||||
elif docker_object == 'services':
|
||||
item_record = self.get_essential_facts_services(item)
|
||||
if item_record['Mode'] == 'Global':
|
||||
item_record['Replicas'] = len(items)
|
||||
items_list.append(item_record)
|
||||
|
||||
return items_list
|
||||
|
||||
@staticmethod
|
||||
def get_essential_facts_nodes(item):
|
||||
object_essentials = dict()
|
||||
|
||||
object_essentials['ID'] = item.get('ID')
|
||||
object_essentials['Hostname'] = item['Description']['Hostname']
|
||||
object_essentials['Status'] = item['Status']['State']
|
||||
object_essentials['Availability'] = item['Spec']['Availability']
|
||||
if 'ManagerStatus' in item:
|
||||
object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
|
||||
if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
|
||||
object_essentials['ManagerStatus'] = "Leader"
|
||||
else:
|
||||
object_essentials['ManagerStatus'] = None
|
||||
object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
|
||||
|
||||
return object_essentials
|
||||
|
||||
def get_essential_facts_tasks(self, item):
|
||||
object_essentials = dict()
|
||||
|
||||
object_essentials['ID'] = item['ID']
|
||||
# Returning container ID to not trigger another connection to host
|
||||
# Container ID is sufficient to get extended info in other tasks
|
||||
object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
|
||||
object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
|
||||
object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
|
||||
object_essentials['DesiredState'] = item['DesiredState']
|
||||
object_essentials['CurrentState'] = item['Status']['State']
|
||||
if 'Err' in item['Status']:
|
||||
object_essentials['Error'] = item['Status']['Err']
|
||||
else:
|
||||
object_essentials['Error'] = None
|
||||
|
||||
return object_essentials
|
||||
|
||||
@staticmethod
|
||||
def get_essential_facts_services(item):
|
||||
object_essentials = dict()
|
||||
|
||||
object_essentials['ID'] = item['ID']
|
||||
object_essentials['Name'] = item['Spec']['Name']
|
||||
if 'Replicated' in item['Spec']['Mode']:
|
||||
object_essentials['Mode'] = "Replicated"
|
||||
object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
|
||||
elif 'Global' in item['Spec']['Mode']:
|
||||
object_essentials['Mode'] = "Global"
|
||||
# Number of replicas have to be updated in calling method or may be left as None
|
||||
object_essentials['Replicas'] = None
|
||||
object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
|
||||
if 'Ports' in item['Spec']['EndpointSpec']:
|
||||
object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
|
||||
else:
|
||||
object_essentials['Ports'] = []
|
||||
|
||||
return object_essentials
|
||||
|
||||
def get_docker_swarm_unlock_key(self):
|
||||
unlock_key = self.client.get_unlock_key() or {}
|
||||
return unlock_key.get('UnlockKey') or None
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
nodes=dict(type='bool', default=False),
|
||||
nodes_filters=dict(type='dict'),
|
||||
tasks=dict(type='bool', default=False),
|
||||
tasks_filters=dict(type='dict'),
|
||||
services=dict(type='bool', default=False),
|
||||
services_filters=dict(type='dict'),
|
||||
unlock_key=dict(type='bool', default=False),
|
||||
verbose_output=dict(type='bool', default=False),
|
||||
)
|
||||
option_minimal_versions = dict(
|
||||
unlock_key=dict(docker_py_version='2.7.0', docker_api_version='1.25'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.24',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
fail_results=dict(
|
||||
can_talk_to_docker=False,
|
||||
docker_swarm_active=False,
|
||||
docker_swarm_manager=False,
|
||||
),
|
||||
)
|
||||
client.fail_results['can_talk_to_docker'] = True
|
||||
client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
|
||||
client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
DockerSwarmManager(client, results)
|
||||
results.update(client.fail_results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
2972
plugins/modules/docker_swarm_service.py
Normal file
2972
plugins/modules/docker_swarm_service.py
Normal file
File diff suppressed because it is too large
Load Diff
121
plugins/modules/docker_swarm_service_info.py
Normal file
121
plugins/modules/docker_swarm_service_info.py
Normal file
@ -0,0 +1,121 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_swarm_service_info
|
||||
|
||||
short_description: Retrieves information about docker services from a Swarm Manager
|
||||
|
||||
description:
|
||||
- Retrieves information about a docker service.
|
||||
- Essentially returns the output of C(docker service inspect <name>).
|
||||
- Must be executed on a host running as Swarm Manager, otherwise the module will fail.
|
||||
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the service to inspect.
|
||||
type: str
|
||||
required: yes
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Hannes Ljungberg (@hannseman)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
|
||||
- "Docker API >= 1.24"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info from a service
|
||||
docker_swarm_service_info:
|
||||
name: myservice
|
||||
register: result
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the service exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
service:
|
||||
description:
|
||||
- A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
|
||||
- Will be C(none) if service does not exist.
|
||||
returned: always
|
||||
type: dict
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
RequestException,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
|
||||
|
||||
def get_service_info(client):
|
||||
service = client.module.params['name']
|
||||
return client.get_service_inspect(
|
||||
service_id=service,
|
||||
skip_missing=True
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='2.0.0',
|
||||
min_docker_api_version='1.24',
|
||||
)
|
||||
|
||||
client.fail_task_if_not_swarm_manager()
|
||||
|
||||
try:
|
||||
service = get_service_info(client)
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
service=service,
|
||||
exists=bool(service)
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
338
plugins/modules/docker_volume.py
Normal file
338
plugins/modules/docker_volume.py
Normal file
@ -0,0 +1,338 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: docker_volume
|
||||
short_description: Manage Docker volumes
|
||||
description:
|
||||
- Create/remove Docker volumes.
|
||||
- Performs largely the same function as the "docker volume" CLI subcommand.
|
||||
options:
|
||||
volume_name:
|
||||
description:
|
||||
- Name of the volume to operate on.
|
||||
type: str
|
||||
required: yes
|
||||
aliases:
|
||||
- name
|
||||
|
||||
driver:
|
||||
description:
|
||||
- Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
|
||||
type: str
|
||||
default: local
|
||||
|
||||
driver_options:
|
||||
description:
|
||||
- "Dictionary of volume settings. Consult docker docs for valid options and values:
|
||||
U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)"
|
||||
type: dict
|
||||
|
||||
labels:
|
||||
description:
|
||||
- Dictionary of label key/values to set for the volume
|
||||
type: dict
|
||||
|
||||
force:
|
||||
description:
|
||||
- With state C(present) causes the volume to be deleted and recreated if the volume already
|
||||
exist and the driver, driver options or labels differ. This will cause any data in the existing
|
||||
volume to be lost.
|
||||
- Deprecated. Will be removed in Ansible 2.12. Set I(recreate) to C(options-changed) instead
|
||||
for the same behavior of setting I(force) to C(yes).
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
recreate:
|
||||
description:
|
||||
- Controls when a volume will be recreated when I(state) is C(present). Please
|
||||
note that recreating an existing volume will cause **any data in the existing volume
|
||||
to be lost!** The volume will be deleted and a new volume with the same name will be
|
||||
created.
|
||||
- The value C(always) forces the volume to be always recreated.
|
||||
- The value C(never) makes sure the volume will not be recreated.
|
||||
- The value C(options-changed) makes sure the volume will be recreated if the volume
|
||||
already exist and the driver, driver options or labels differ.
|
||||
type: str
|
||||
default: never
|
||||
choices:
|
||||
- always
|
||||
- never
|
||||
- options-changed
|
||||
|
||||
state:
|
||||
description:
|
||||
- C(absent) deletes the volume.
|
||||
- C(present) creates the volume, if it does not already exist.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Alex Grönholm (@agronholm)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "The docker server >= 1.9.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a volume
|
||||
docker_volume:
|
||||
name: volume_one
|
||||
|
||||
- name: Remove a volume
|
||||
docker_volume:
|
||||
name: volume_one
|
||||
state: absent
|
||||
|
||||
- name: Create a volume with options
|
||||
docker_volume:
|
||||
name: volume_two
|
||||
driver_options:
|
||||
type: btrfs
|
||||
device: /dev/sda2
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
volume:
|
||||
description:
|
||||
- Volume inspection results for the affected volume.
|
||||
- Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
|
||||
are also accessible directly as C(docker_volume). Note that the returned fact will be removed in Ansible 2.12.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
DockerBaseClass,
|
||||
AnsibleDockerClient,
|
||||
DifferenceTracker,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils.six import iteritems, text_type
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self, client):
|
||||
super(TaskParameters, self).__init__()
|
||||
self.client = client
|
||||
|
||||
self.volume_name = None
|
||||
self.driver = None
|
||||
self.driver_options = None
|
||||
self.labels = None
|
||||
self.force = None
|
||||
self.recreate = None
|
||||
self.debug = None
|
||||
|
||||
for key, value in iteritems(client.module.params):
|
||||
setattr(self, key, value)
|
||||
|
||||
if self.force is not None:
|
||||
if self.recreate != 'never':
|
||||
client.fail('Cannot use the deprecated "force" '
|
||||
'option when "recreate" is set. Please stop '
|
||||
'using the force option.')
|
||||
client.module.warn('The "force" option of docker_volume has been deprecated '
|
||||
'in Ansible 2.8. Please use the "recreate" '
|
||||
'option, which provides the same functionality as "force".')
|
||||
self.recreate = 'options-changed' if self.force else 'never'
|
||||
|
||||
|
||||
class DockerVolumeManager(object):
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
self.parameters = TaskParameters(client)
|
||||
self.check_mode = self.client.check_mode
|
||||
self.results = {
|
||||
u'changed': False,
|
||||
u'actions': []
|
||||
}
|
||||
self.diff = self.client.module._diff
|
||||
self.diff_tracker = DifferenceTracker()
|
||||
self.diff_result = dict()
|
||||
|
||||
self.existing_volume = self.get_existing_volume()
|
||||
|
||||
state = self.parameters.state
|
||||
if state == 'present':
|
||||
self.present()
|
||||
elif state == 'absent':
|
||||
self.absent()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
if self.diff:
|
||||
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
|
||||
self.results['diff'] = self.diff_result
|
||||
|
||||
def get_existing_volume(self):
|
||||
try:
|
||||
volumes = self.client.volumes()
|
||||
except APIError as e:
|
||||
self.client.fail(text_type(e))
|
||||
|
||||
if volumes[u'Volumes'] is None:
|
||||
return None
|
||||
|
||||
for volume in volumes[u'Volumes']:
|
||||
if volume['Name'] == self.parameters.volume_name:
|
||||
return volume
|
||||
|
||||
return None
|
||||
|
||||
def has_different_config(self):
|
||||
"""
|
||||
Return the list of differences between the current parameters and the existing volume.
|
||||
|
||||
:return: list of options that differ
|
||||
"""
|
||||
differences = DifferenceTracker()
|
||||
if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
|
||||
differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
|
||||
if self.parameters.driver_options:
|
||||
if not self.existing_volume.get('Options'):
|
||||
differences.add('driver_options',
|
||||
parameter=self.parameters.driver_options,
|
||||
active=self.existing_volume.get('Options'))
|
||||
else:
|
||||
for key, value in iteritems(self.parameters.driver_options):
|
||||
if (not self.existing_volume['Options'].get(key) or
|
||||
value != self.existing_volume['Options'][key]):
|
||||
differences.add('driver_options.%s' % key,
|
||||
parameter=value,
|
||||
active=self.existing_volume['Options'].get(key))
|
||||
if self.parameters.labels:
|
||||
existing_labels = self.existing_volume.get('Labels', {})
|
||||
for label in self.parameters.labels:
|
||||
if existing_labels.get(label) != self.parameters.labels.get(label):
|
||||
differences.add('labels.%s' % label,
|
||||
parameter=self.parameters.labels.get(label),
|
||||
active=existing_labels.get(label))
|
||||
|
||||
return differences
|
||||
|
||||
def create_volume(self):
|
||||
if not self.existing_volume:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
params = dict(
|
||||
driver=self.parameters.driver,
|
||||
driver_opts=self.parameters.driver_options,
|
||||
)
|
||||
|
||||
if self.parameters.labels is not None:
|
||||
params['labels'] = self.parameters.labels
|
||||
|
||||
resp = self.client.create_volume(self.parameters.volume_name, **params)
|
||||
self.existing_volume = self.client.inspect_volume(resp['Name'])
|
||||
except APIError as e:
|
||||
self.client.fail(text_type(e))
|
||||
|
||||
self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
|
||||
self.results['changed'] = True
|
||||
|
||||
def remove_volume(self):
|
||||
if self.existing_volume:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.remove_volume(self.parameters.volume_name)
|
||||
except APIError as e:
|
||||
self.client.fail(text_type(e))
|
||||
|
||||
self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
|
||||
self.results['changed'] = True
|
||||
|
||||
def present(self):
|
||||
differences = DifferenceTracker()
|
||||
if self.existing_volume:
|
||||
differences = self.has_different_config()
|
||||
|
||||
self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
|
||||
if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
|
||||
self.remove_volume()
|
||||
self.existing_volume = None
|
||||
|
||||
self.create_volume()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
|
||||
self.diff_tracker.merge(differences)
|
||||
|
||||
if not self.check_mode and not self.parameters.debug:
|
||||
self.results.pop('actions')
|
||||
|
||||
volume_facts = self.get_existing_volume()
|
||||
self.results['ansible_facts'] = {u'docker_volume': volume_facts}
|
||||
self.results['volume'] = volume_facts
|
||||
|
||||
def absent(self):
|
||||
self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
|
||||
self.remove_volume()
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
volume_name=dict(type='str', required=True, aliases=['name']),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
driver=dict(type='str', default='local'),
|
||||
driver_options=dict(type='dict', default={}),
|
||||
labels=dict(type='dict'),
|
||||
force=dict(type='bool', removed_in_version='2.12'),
|
||||
recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
|
||||
debug=dict(type='bool', default=False)
|
||||
)
|
||||
|
||||
option_minimal_versions = dict(
|
||||
labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.21',
|
||||
# "The docker server >= 1.9.0"
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
try:
|
||||
cm = DockerVolumeManager(client)
|
||||
client.module.exit_json(**cm.results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
133
plugins/modules/docker_volume_info.py
Normal file
133
plugins/modules/docker_volume_info.py
Normal file
@ -0,0 +1,133 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: docker_volume_info
|
||||
short_description: Retrieve facts about Docker volumes
|
||||
description:
|
||||
- Performs largely the same function as the "docker volume inspect" CLI subcommand.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the volume to inspect.
|
||||
type: str
|
||||
required: yes
|
||||
aliases:
|
||||
- volume_name
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.21"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get infos on volume
|
||||
docker_volume_info:
|
||||
name: mydata
|
||||
register: result
|
||||
|
||||
- name: Does volume exist?
|
||||
debug:
|
||||
msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
|
||||
|
||||
- name: Print information about volume
|
||||
debug:
|
||||
var: result.volume
|
||||
when: result.exists
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the volume exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
volume:
|
||||
description:
|
||||
- Volume inspection results for the affected volume.
|
||||
- Will be C(none) if volume does not exist.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{
|
||||
"CreatedAt": "2018-12-09T17:43:44+01:00",
|
||||
"Driver": "local",
|
||||
"Labels": null,
|
||||
"Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
|
||||
"Name": "ansible-test-bd3f6172",
|
||||
"Options": {},
|
||||
"Scope": "local"
|
||||
}'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, NotFound
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
def get_existing_volume(client, volume_name):
|
||||
try:
|
||||
return client.inspect_volume(volume_name)
|
||||
except NotFound as dummy:
|
||||
return None
|
||||
except Exception as exc:
|
||||
client.fail("Error inspecting volume: %s" % exc)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True, aliases=['volume_name']),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.8.0',
|
||||
min_docker_api_version='1.21',
|
||||
)
|
||||
|
||||
try:
|
||||
volume = get_existing_volume(client, client.module.params['name'])
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
exists=(True if volume else False),
|
||||
volume=volume,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
2
tests/integration/targets/connection_docker/aliases
Normal file
2
tests/integration/targets/connection_docker/aliases
Normal file
@ -0,0 +1,2 @@
|
||||
non_local
|
||||
unsupported
|
||||
1
tests/integration/targets/connection_docker/runme.sh
Symbolic link
1
tests/integration/targets/connection_docker/runme.sh
Symbolic link
@ -0,0 +1 @@
|
||||
../connection_posix/test.sh
|
||||
@ -0,0 +1,6 @@
|
||||
[docker]
|
||||
docker-pipelining ansible_ssh_pipelining=true
|
||||
docker-no-pipelining ansible_ssh_pipelining=false
|
||||
[docker:vars]
|
||||
ansible_host=ubuntu-latest
|
||||
ansible_connection=docker
|
||||
8
tests/integration/targets/docker_config/aliases
Normal file
8
tests/integration/targets/docker_config/aliases
Normal file
@ -0,0 +1,8 @@
|
||||
shippable/posix/group3
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
skip/aix
|
||||
destructive
|
||||
skip/docker # The tests sometimes make docker daemon unstable; hence,
|
||||
# we skip all docker-based CI runs to avoid disrupting
|
||||
# the whole CI system.
|
||||
3
tests/integration/targets/docker_config/meta/main.yml
Normal file
3
tests/integration/targets/docker_config/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
6
tests/integration/targets/docker_config/tasks/main.yml
Normal file
6
tests/integration/targets/docker_config/tasks/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- include_tasks: test_docker_config.yml
|
||||
when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.30', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_config tests!"
|
||||
when: not(docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.30', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
@ -0,0 +1,133 @@
|
||||
---
|
||||
- block:
|
||||
- shell: "docker info --format '{% raw %}{{json .}}{% endraw %}' | python -m json.tool"
|
||||
|
||||
- name: Make sure we're not already using Docker swarm
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
|
||||
- shell: "docker info --format '{% raw %}{{json .}}{% endraw %}' | python -m json.tool"
|
||||
|
||||
- name: Create a Swarm cluster
|
||||
docker_swarm:
|
||||
state: present
|
||||
advertise_addr: "{{ansible_default_ipv4.address}}"
|
||||
|
||||
- name: Parameter name should be required
|
||||
docker_config:
|
||||
state: present
|
||||
ignore_errors: yes
|
||||
register: output
|
||||
|
||||
- name: assert failure when called with no name
|
||||
assert:
|
||||
that:
|
||||
- 'output.failed'
|
||||
- 'output.msg == "missing required arguments: name"'
|
||||
|
||||
- name: Test parameters
|
||||
docker_config:
|
||||
name: foo
|
||||
state: present
|
||||
ignore_errors: yes
|
||||
register: output
|
||||
|
||||
- name: assert failure when called with no data
|
||||
assert:
|
||||
that:
|
||||
- 'output.failed'
|
||||
- 'output.msg == "state is present but all of the following are missing: data"'
|
||||
|
||||
- name: Create config
|
||||
docker_config:
|
||||
name: db_password
|
||||
data: opensesame!
|
||||
state: present
|
||||
register: output
|
||||
|
||||
- name: Create variable config_id
|
||||
set_fact:
|
||||
config_id: "{{ output.config_id }}"
|
||||
|
||||
- name: Inspect config
|
||||
command: "docker config inspect {{ config_id }}"
|
||||
register: inspect
|
||||
|
||||
- debug: var=inspect
|
||||
|
||||
- name: assert config creation succeeded
|
||||
assert:
|
||||
that:
|
||||
- "'db_password' in inspect.stdout"
|
||||
- "'ansible_key' in inspect.stdout"
|
||||
|
||||
- name: Create config again
|
||||
docker_config:
|
||||
name: db_password
|
||||
data: opensesame!
|
||||
state: present
|
||||
register: output
|
||||
|
||||
- name: assert create config is idempotent
|
||||
assert:
|
||||
that:
|
||||
- not output.changed
|
||||
|
||||
- name: Create config again (base64)
|
||||
docker_config:
|
||||
name: db_password
|
||||
data: b3BlbnNlc2FtZSE=
|
||||
data_is_b64: true
|
||||
state: present
|
||||
register: output
|
||||
|
||||
- name: assert create config (base64) is idempotent
|
||||
assert:
|
||||
that:
|
||||
- not output.changed
|
||||
|
||||
- name: Update config
|
||||
docker_config:
|
||||
name: db_password
|
||||
data: newpassword!
|
||||
state: present
|
||||
register: output
|
||||
|
||||
- name: assert config was updated
|
||||
assert:
|
||||
that:
|
||||
- output.changed
|
||||
- output.config_id != config_id
|
||||
|
||||
- name: Remove config
|
||||
docker_config:
|
||||
name: db_password
|
||||
state: absent
|
||||
|
||||
- name: Check that config is removed
|
||||
command: "docker config inspect {{ config_id }}"
|
||||
register: output
|
||||
ignore_errors: yes
|
||||
|
||||
- name: assert config was removed
|
||||
assert:
|
||||
that:
|
||||
- output.failed
|
||||
|
||||
- name: Remove config
|
||||
docker_config:
|
||||
name: db_password
|
||||
state: absent
|
||||
register: output
|
||||
|
||||
- name: assert remove config is idempotent
|
||||
assert:
|
||||
that:
|
||||
- not output.changed
|
||||
|
||||
always:
|
||||
- name: Remove a Swarm cluster
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
5
tests/integration/targets/docker_container/aliases
Normal file
5
tests/integration/targets/docker_container/aliases
Normal file
@ -0,0 +1,5 @@
|
||||
shippable/posix/group5
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
skip/aix
|
||||
destructive
|
||||
@ -0,0 +1,2 @@
|
||||
TEST3=val3
|
||||
TEST4=val4
|
||||
3
tests/integration/targets/docker_container/meta/main.yml
Normal file
3
tests/integration/targets/docker_container/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
43
tests/integration/targets/docker_container/tasks/main.yml
Normal file
43
tests/integration/targets/docker_container/tasks/main.yml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
# Create random name prefix (for containers, networks, ...)
|
||||
- name: Create random container name prefix
|
||||
set_fact:
|
||||
cname_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
|
||||
cnames: []
|
||||
dnetworks: []
|
||||
|
||||
- debug:
|
||||
msg: "Using container name prefix {{ cname_prefix }}"
|
||||
|
||||
# Install netaddr
|
||||
- name: Install netaddr for ipaddr filter
|
||||
pip:
|
||||
name: netaddr
|
||||
|
||||
# Run the tests
|
||||
- block:
|
||||
- include_tasks: run-test.yml
|
||||
with_fileglob:
|
||||
- "tests/*.yml"
|
||||
|
||||
always:
|
||||
- name: "Make sure all containers are removed"
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
with_items: "{{ cnames }}"
|
||||
diff: no
|
||||
- name: "Make sure all networks are removed"
|
||||
docker_network:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
force: yes
|
||||
with_items: "{{ dnetworks }}"
|
||||
when: docker_py_version is version('1.10.0', '>=')
|
||||
diff: no
|
||||
|
||||
when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run all docker_container tests!"
|
||||
when: not(docker_py_version is version('3.5.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: "Loading tasks from {{ item }}"
|
||||
include_tasks: "{{ item }}"
|
||||
@ -0,0 +1,463 @@
|
||||
---
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cname: "{{ cname_prefix ~ '-comparisons' }}"
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cnames: "{{ cnames + [cname] }}"
|
||||
|
||||
####################################################################
|
||||
## value ###########################################################
|
||||
####################################################################
|
||||
|
||||
- name: value
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
hostname: example.com
|
||||
register: value_1
|
||||
|
||||
- name: value (change, ignore)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
hostname: example.org
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
hostname: ignore
|
||||
register: value_2
|
||||
|
||||
- name: value (change, strict)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
hostname: example.org
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
hostname: strict
|
||||
register: value_3
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- value_1 is changed
|
||||
- value_2 is not changed
|
||||
- value_3 is changed
|
||||
|
||||
####################################################################
|
||||
## list ############################################################
|
||||
####################################################################
|
||||
|
||||
- name: list
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
dns_servers:
|
||||
- 1.1.1.1
|
||||
- 8.8.8.8
|
||||
register: list_1
|
||||
|
||||
- name: list (change, ignore)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
dns_servers:
|
||||
- 9.9.9.9
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
dns_servers: ignore
|
||||
register: list_2
|
||||
|
||||
- name: list (change, strict)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
dns_servers:
|
||||
- 9.9.9.9
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
dns_servers: strict
|
||||
register: list_3
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- list_1 is changed
|
||||
- list_2 is not changed
|
||||
- list_3 is changed
|
||||
|
||||
####################################################################
|
||||
## set #############################################################
|
||||
####################################################################
|
||||
|
||||
- name: set
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
groups:
|
||||
- "1010"
|
||||
- "1011"
|
||||
register: set_1
|
||||
|
||||
- name: set (change, ignore)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
groups:
|
||||
- "1010"
|
||||
- "1011"
|
||||
- "1012"
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
groups: ignore
|
||||
register: set_2
|
||||
|
||||
- name: set (change, allow_more_present)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
groups:
|
||||
- "1010"
|
||||
- "1011"
|
||||
- "1012"
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
groups: allow_more_present
|
||||
register: set_3
|
||||
|
||||
- name: set (change, allow_more_present)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
groups:
|
||||
- "1010"
|
||||
- "1012"
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
groups: allow_more_present
|
||||
register: set_4
|
||||
|
||||
- name: set (change, strict)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
groups:
|
||||
- "1010"
|
||||
- "1012"
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
groups: strict
|
||||
register: set_5
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- set_1 is changed
|
||||
- set_2 is not changed
|
||||
- set_3 is changed
|
||||
- set_4 is not changed
|
||||
- set_5 is changed
|
||||
|
||||
####################################################################
|
||||
## set(dict) #######################################################
|
||||
####################################################################
|
||||
|
||||
- name: set(dict)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
devices:
|
||||
- "/dev/random:/dev/virt-random:rwm"
|
||||
- "/dev/urandom:/dev/virt-urandom:rwm"
|
||||
register: set_dict_1
|
||||
|
||||
- name: set(dict) (change, ignore)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
devices:
|
||||
- "/dev/random:/dev/virt-random:rwm"
|
||||
- "/dev/urandom:/dev/virt-urandom:rwm"
|
||||
- "/dev/null:/dev/virt-null:rwm"
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
devices: ignore
|
||||
register: set_dict_2
|
||||
|
||||
- name: set(dict) (change, allow_more_present)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
devices:
|
||||
- "/dev/random:/dev/virt-random:rwm"
|
||||
- "/dev/urandom:/dev/virt-urandom:rwm"
|
||||
- "/dev/null:/dev/virt-null:rwm"
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
devices: allow_more_present
|
||||
register: set_dict_3
|
||||
|
||||
- name: set(dict) (change, allow_more_present)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
devices:
|
||||
- "/dev/random:/dev/virt-random:rwm"
|
||||
- "/dev/null:/dev/virt-null:rwm"
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
devices: allow_more_present
|
||||
register: set_dict_4
|
||||
|
||||
- name: set(dict) (change, strict)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
devices:
|
||||
- "/dev/random:/dev/virt-random:rwm"
|
||||
- "/dev/null:/dev/virt-null:rwm"
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
devices: strict
|
||||
register: set_dict_5
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- set_dict_1 is changed
|
||||
- set_dict_2 is not changed
|
||||
- set_dict_3 is changed
|
||||
- set_dict_4 is not changed
|
||||
- set_dict_5 is changed
|
||||
|
||||
####################################################################
|
||||
## dict ############################################################
|
||||
####################################################################
|
||||
|
||||
- name: dict
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.2: world
|
||||
register: dict_1
|
||||
|
||||
- name: dict (change, ignore)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.2: world
|
||||
ansible.test.3: ansible
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
labels: ignore
|
||||
register: dict_2
|
||||
|
||||
- name: dict (change, allow_more_present)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.2: world
|
||||
ansible.test.3: ansible
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
labels: allow_more_present
|
||||
register: dict_3
|
||||
|
||||
- name: dict (change, allow_more_present)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.3: ansible
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
labels: allow_more_present
|
||||
register: dict_4
|
||||
|
||||
- name: dict (change, strict)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.3: ansible
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
labels: strict
|
||||
register: dict_5
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- dict_1 is changed
|
||||
- dict_2 is not changed
|
||||
- dict_3 is changed
|
||||
- dict_4 is not changed
|
||||
- dict_5 is changed
|
||||
|
||||
####################################################################
|
||||
## wildcard ########################################################
|
||||
####################################################################
|
||||
|
||||
- name: Pull hello-world image to make sure wildcard_2 test succeeds
|
||||
# If the image isn't there, it will pull it and return 'changed'.
|
||||
docker_image:
|
||||
name: hello-world
|
||||
pull: true
|
||||
|
||||
- name: wildcard
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
hostname: example.com
|
||||
stop_timeout: 1
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.2: world
|
||||
ansible.test.3: ansible
|
||||
register: wildcard_1
|
||||
|
||||
- name: wildcard (change, ignore)
|
||||
docker_container:
|
||||
image: hello-world
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
hostname: example.org
|
||||
stop_timeout: 2
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.4: ignore
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
'*': ignore
|
||||
register: wildcard_2
|
||||
|
||||
- name: wildcard (change, strict)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
hostname: example.org
|
||||
stop_timeout: 1
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.2: world
|
||||
ansible.test.3: ansible
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
'*': strict
|
||||
register: wildcard_3
|
||||
|
||||
- name: wildcard (no change, strict)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
hostname: example.org
|
||||
stop_timeout: 1
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.2: world
|
||||
ansible.test.3: ansible
|
||||
force_kill: yes
|
||||
comparisons:
|
||||
'*': strict
|
||||
register: wildcard_4
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- wildcard_1 is changed
|
||||
- wildcard_2 is not changed
|
||||
- wildcard_3 is changed
|
||||
- wildcard_4 is not changed
|
||||
@ -0,0 +1,118 @@
|
||||
---
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cname: "{{ cname_prefix ~ '-hi' }}"
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cnames: "{{ cnames + [cname] }}"
|
||||
|
||||
####################################################################
|
||||
## container_default_behavior: compatibility #######################
|
||||
####################################################################
|
||||
|
||||
- name: Start container (check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
state: started
|
||||
container_default_behavior: compatibility
|
||||
check_mode: yes
|
||||
register: start_1
|
||||
|
||||
- name: Start container
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
state: started
|
||||
container_default_behavior: compatibility
|
||||
register: start_2
|
||||
|
||||
- name: Start container (idempotent)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
state: started
|
||||
container_default_behavior: compatibility
|
||||
register: start_3
|
||||
|
||||
- name: Start container (idempotent check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
image: alpine:3.8
|
||||
state: started
|
||||
container_default_behavior: compatibility
|
||||
check_mode: yes
|
||||
register: start_4
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- start_1 is changed
|
||||
- start_2 is changed
|
||||
- start_3 is not changed
|
||||
- start_4 is not changed
|
||||
|
||||
####################################################################
|
||||
## container_default_behavior: no_defaults #########################
|
||||
####################################################################
|
||||
|
||||
- name: Start container (check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
state: started
|
||||
container_default_behavior: no_defaults
|
||||
check_mode: yes
|
||||
register: start_1
|
||||
|
||||
- name: Start container
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
state: started
|
||||
container_default_behavior: no_defaults
|
||||
register: start_2
|
||||
|
||||
- name: Start container (idempotent)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
state: started
|
||||
container_default_behavior: no_defaults
|
||||
register: start_3
|
||||
|
||||
- name: Start container (idempotent check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
state: started
|
||||
container_default_behavior: no_defaults
|
||||
check_mode: yes
|
||||
register: start_4
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- start_1 is changed
|
||||
- start_2 is changed
|
||||
- start_3 is not changed
|
||||
- start_4 is not changed
|
||||
@ -0,0 +1,146 @@
|
||||
---
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cname: "{{ cname_prefix ~ '-iid' }}"
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cnames: "{{ cnames + [cname] }}"
|
||||
|
||||
- name: Pull images
|
||||
docker_image:
|
||||
name: "{{ image }}"
|
||||
source: pull
|
||||
loop:
|
||||
- "hello-world:latest"
|
||||
- "alpine:3.8"
|
||||
loop_control:
|
||||
loop_var: image
|
||||
|
||||
- name: Get image ID of hello-world and alpine images
|
||||
docker_image_info:
|
||||
name:
|
||||
- "hello-world:latest"
|
||||
- "alpine:3.8"
|
||||
register: image_info
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- image_info.images | length == 2
|
||||
|
||||
- name: Print image IDs
|
||||
debug:
|
||||
msg: "hello-world: {{ image_info.images[0].Id }}; alpine: {{ image_info.images[1].Id }}"
|
||||
|
||||
- name: Create container with hello-world image via ID
|
||||
docker_container:
|
||||
image: "{{ image_info.images[0].Id }}"
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: create_1
|
||||
|
||||
- name: Create container with hello-world image via ID (idempotent)
|
||||
docker_container:
|
||||
image: "{{ image_info.images[0].Id }}"
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: create_2
|
||||
|
||||
- name: Create container with alpine image via ID
|
||||
docker_container:
|
||||
image: "{{ image_info.images[1].Id }}"
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: create_3
|
||||
|
||||
- name: Create container with alpine image via ID (idempotent)
|
||||
docker_container:
|
||||
image: "{{ image_info.images[1].Id }}"
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: create_4
|
||||
|
||||
- name: Untag image
|
||||
# Image will not be deleted since the container still uses it
|
||||
docker_image:
|
||||
name: alpine:3.8
|
||||
force_absent: yes
|
||||
state: absent
|
||||
|
||||
- name: Create container with alpine image via name (check mode, will pull, same image)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
register: create_5
|
||||
check_mode: yes
|
||||
|
||||
- name: Create container with alpine image via name (will pull, same image)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
register: create_6
|
||||
|
||||
- name: Cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- create_1 is changed
|
||||
- create_2 is not changed
|
||||
- create_3 is changed
|
||||
- create_4 is not changed
|
||||
- create_5 is changed
|
||||
- create_6 is changed
|
||||
- create_6.container.Image == image_info.images[1].Id
|
||||
- create_6.container.Id == create_4.container.Id # make sure container wasn't recreated
|
||||
|
||||
- name: set Digests
|
||||
set_fact:
|
||||
digest_hello_world_2016: 0256e8a36e2070f7bf2d0b0763dbabdd67798512411de4cdcf9431a1feb60fd9
|
||||
digest_hello_world_2019: 2557e3c07ed1e38f26e389462d03ed943586f744621577a99efb77324b0fe535
|
||||
|
||||
- name: Create container with hello-world image via old digest
|
||||
docker_container:
|
||||
image: "hello-world@sha256:{{ digest_hello_world_2016 }}"
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: digest_1
|
||||
|
||||
- name: Create container with hello-world image via old digest (idempotent)
|
||||
docker_container:
|
||||
image: "hello-world@sha256:{{ digest_hello_world_2016 }}"
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: digest_2
|
||||
|
||||
- name: Update container with hello-world image via new digest
|
||||
docker_container:
|
||||
image: "hello-world@sha256:{{ digest_hello_world_2019 }}"
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: digest_3
|
||||
|
||||
- name: Cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- digest_1 is changed
|
||||
- digest_2 is not changed
|
||||
- digest_3 is changed
|
||||
@ -0,0 +1,445 @@
|
||||
---
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cname: "{{ cname_prefix ~ '-mounts' }}"
|
||||
cname_h1: "{{ cname_prefix ~ '-mounts-h1' }}"
|
||||
cname_h2: "{{ cname_prefix ~ '-mounts-h2' }}"
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cnames: "{{ cnames + [cname, cname_h1, cname_h2] }}"
|
||||
|
||||
####################################################################
|
||||
## keep_volumes ####################################################
|
||||
####################################################################
|
||||
|
||||
# TODO: - keep_volumes
|
||||
|
||||
####################################################################
|
||||
## mounts ##########################################################
|
||||
####################################################################
|
||||
|
||||
- name: mounts
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /tmp
|
||||
target: /tmp
|
||||
type: bind
|
||||
- source: /
|
||||
target: /whatever
|
||||
type: bind
|
||||
read_only: no
|
||||
register: mounts_1
|
||||
ignore_errors: yes
|
||||
|
||||
- name: mounts (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /
|
||||
target: /whatever
|
||||
type: bind
|
||||
read_only: no
|
||||
- source: /tmp
|
||||
target: /tmp
|
||||
type: bind
|
||||
register: mounts_2
|
||||
ignore_errors: yes
|
||||
|
||||
- name: mounts (less mounts)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /tmp
|
||||
target: /tmp
|
||||
type: bind
|
||||
register: mounts_3
|
||||
ignore_errors: yes
|
||||
|
||||
- name: mounts (more mounts)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /tmp
|
||||
target: /tmp
|
||||
type: bind
|
||||
- source: /tmp
|
||||
target: /somewhereelse
|
||||
type: bind
|
||||
read_only: yes
|
||||
force_kill: yes
|
||||
register: mounts_4
|
||||
ignore_errors: yes
|
||||
|
||||
- name: mounts (different modes)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /tmp
|
||||
target: /tmp
|
||||
type: bind
|
||||
- source: /tmp
|
||||
target: /somewhereelse
|
||||
type: bind
|
||||
read_only: no
|
||||
force_kill: yes
|
||||
register: mounts_5
|
||||
ignore_errors: yes
|
||||
|
||||
- name: mounts (endpoint collision)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /home
|
||||
target: /x
|
||||
type: bind
|
||||
- source: /etc
|
||||
target: /x
|
||||
type: bind
|
||||
read_only: no
|
||||
force_kill: yes
|
||||
register: mounts_6
|
||||
ignore_errors: yes
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- mounts_1 is changed
|
||||
- mounts_2 is not changed
|
||||
- mounts_3 is not changed
|
||||
- mounts_4 is changed
|
||||
- mounts_5 is changed
|
||||
- mounts_6 is failed
|
||||
- "'The mount point \"/x\" appears twice in the mounts option' == mounts_6.msg"
|
||||
when: docker_py_version is version('2.6.0', '>=')
|
||||
- assert:
|
||||
that:
|
||||
- mounts_1 is failed
|
||||
- "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg"
|
||||
- "'Minimum version required is 2.6.0 ' in mounts_1.msg"
|
||||
when: docker_py_version is version('2.6.0', '<')
|
||||
|
||||
####################################################################
|
||||
## mounts + volumes ################################################
|
||||
####################################################################
|
||||
|
||||
- name: mounts + volumes
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /
|
||||
target: /whatever
|
||||
type: bind
|
||||
read_only: yes
|
||||
volumes:
|
||||
- /tmp:/tmp
|
||||
register: mounts_volumes_1
|
||||
ignore_errors: yes
|
||||
|
||||
- name: mounts + volumes (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /
|
||||
target: /whatever
|
||||
type: bind
|
||||
read_only: yes
|
||||
volumes:
|
||||
- /tmp:/tmp
|
||||
register: mounts_volumes_2
|
||||
ignore_errors: yes
|
||||
|
||||
- name: mounts + volumes (switching)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /tmp
|
||||
target: /tmp
|
||||
type: bind
|
||||
read_only: no
|
||||
volumes:
|
||||
- /:/whatever:ro
|
||||
force_kill: yes
|
||||
register: mounts_volumes_3
|
||||
ignore_errors: yes
|
||||
|
||||
- name: mounts + volumes (collision, should fail)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
mounts:
|
||||
- source: /tmp
|
||||
target: /tmp
|
||||
type: bind
|
||||
read_only: no
|
||||
volumes:
|
||||
- /tmp:/tmp
|
||||
force_kill: yes
|
||||
register: mounts_volumes_4
|
||||
ignore_errors: yes
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- mounts_volumes_1 is changed
|
||||
- mounts_volumes_2 is not changed
|
||||
- mounts_volumes_3 is changed
|
||||
- mounts_volumes_4 is failed
|
||||
- "'The mount point \"/tmp\" appears both in the volumes and mounts option' in mounts_volumes_4.msg"
|
||||
when: docker_py_version is version('2.6.0', '>=')
|
||||
- assert:
|
||||
that:
|
||||
- mounts_volumes_1 is failed
|
||||
- "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg"
|
||||
- "'Minimum version required is 2.6.0 ' in mounts_1.msg"
|
||||
when: docker_py_version is version('2.6.0', '<')
|
||||
|
||||
####################################################################
|
||||
## volume_driver ###################################################
|
||||
####################################################################
|
||||
|
||||
- name: volume_driver
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
volume_driver: local
|
||||
state: started
|
||||
register: volume_driver_1
|
||||
|
||||
- name: volume_driver (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
volume_driver: local
|
||||
state: started
|
||||
register: volume_driver_2
|
||||
|
||||
- name: volume_driver (change)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
volume_driver: /
|
||||
state: started
|
||||
force_kill: yes
|
||||
register: volume_driver_3
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- volume_driver_1 is changed
|
||||
- volume_driver_2 is not changed
|
||||
- volume_driver_3 is changed
|
||||
|
||||
####################################################################
|
||||
## volumes #########################################################
|
||||
####################################################################
|
||||
|
||||
- name: volumes
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
volumes:
|
||||
- "/tmp:/tmp"
|
||||
- "/:/whatever:rw,z"
|
||||
- "/anon:rw"
|
||||
register: volumes_1
|
||||
|
||||
- name: volumes (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
volumes:
|
||||
- "/:/whatever:rw,z"
|
||||
- "/tmp:/tmp"
|
||||
- "/anon:rw"
|
||||
register: volumes_2
|
||||
|
||||
- name: volumes (less volumes)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
volumes:
|
||||
- "/tmp:/tmp"
|
||||
register: volumes_3
|
||||
|
||||
- name: volumes (more volumes)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
volumes:
|
||||
- "/tmp:/tmp"
|
||||
- "/tmp:/somewhereelse:ro,Z"
|
||||
force_kill: yes
|
||||
register: volumes_4
|
||||
|
||||
- name: volumes (different modes)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
volumes:
|
||||
- "/tmp:/tmp"
|
||||
- "/tmp:/somewhereelse:ro"
|
||||
force_kill: yes
|
||||
register: volumes_5
|
||||
|
||||
- name: volumes (collision)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
volumes:
|
||||
- "/etc:/tmp"
|
||||
- "/home:/tmp:ro"
|
||||
force_kill: yes
|
||||
register: volumes_6
|
||||
ignore_errors: yes
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- volumes_1 is changed
|
||||
- volumes_1.container.Config.Volumes | length == 1
|
||||
- volumes_1.container.Config.Volumes['/anon:rw'] | length == 0
|
||||
- volumes_2 is not changed
|
||||
- volumes_3 is not changed
|
||||
- volumes_4 is changed
|
||||
- not volumes_4.container.Config.Volumes
|
||||
- volumes_5 is changed
|
||||
- volumes_6 is failed
|
||||
- "'The mount point \"/tmp\" appears twice in the volumes option' in volumes_6.msg"
|
||||
|
||||
####################################################################
|
||||
## volumes_from ####################################################
|
||||
####################################################################
|
||||
|
||||
- name: start helpers
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ container_name }}"
|
||||
state: started
|
||||
volumes:
|
||||
- "{{ '/tmp:/tmp' if container_name == cname_h1 else '/:/whatever:ro' }}"
|
||||
loop:
|
||||
- "{{ cname_h1 }}"
|
||||
- "{{ cname_h2 }}"
|
||||
loop_control:
|
||||
loop_var: container_name
|
||||
|
||||
- name: volumes_from
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
volumes_from: "{{ cname_h1 }}"
|
||||
register: volumes_from_1
|
||||
|
||||
- name: volumes_from (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
volumes_from: "{{ cname_h1 }}"
|
||||
register: volumes_from_2
|
||||
|
||||
- name: volumes_from (change)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
volumes_from: "{{ cname_h2 }}"
|
||||
force_kill: yes
|
||||
register: volumes_from_3
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ container_name }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
loop:
|
||||
- "{{ cname }}"
|
||||
- "{{ cname_h1 }}"
|
||||
- "{{ cname_h2 }}"
|
||||
loop_control:
|
||||
loop_var: container_name
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- volumes_from_1 is changed
|
||||
- volumes_from_2 is not changed
|
||||
- volumes_from_3 is changed
|
||||
|
||||
####################################################################
|
||||
####################################################################
|
||||
####################################################################
|
||||
@ -0,0 +1,736 @@
|
||||
---
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cname: "{{ cname_prefix ~ '-network' }}"
|
||||
cname_h1: "{{ cname_prefix ~ '-network-h1' }}"
|
||||
nname_1: "{{ cname_prefix ~ '-network-1' }}"
|
||||
nname_2: "{{ cname_prefix ~ '-network-2' }}"
|
||||
nname_3: "{{ cname_prefix ~ '-network-3' }}"
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cnames: "{{ cnames + [cname, cname_h1] }}"
|
||||
dnetworks: "{{ dnetworks + [nname_1, nname_2, nname_3] }}"
|
||||
|
||||
- name: Create networks
|
||||
docker_network:
|
||||
name: "{{ network_name }}"
|
||||
state: present
|
||||
loop:
|
||||
- "{{ nname_1 }}"
|
||||
- "{{ nname_2 }}"
|
||||
loop_control:
|
||||
loop_var: network_name
|
||||
when: docker_py_version is version('1.10.0', '>=')
|
||||
|
||||
- set_fact:
|
||||
subnet_ipv4: "192.168.{{ 64 + (192 | random) }}.0/24"
|
||||
subnet_ipv6: "fdb6:feea:{{ '%0.4x:%0.4x' | format(65536 | random, 65536 | random) }}::/64"
|
||||
|
||||
- set_fact:
|
||||
# If netaddr would be installed on the controller, one could do:
|
||||
nname_3_ipv4_2: "{{ subnet_ipv4 | next_nth_usable(2) }}"
|
||||
nname_3_ipv4_3: "{{ subnet_ipv4 | next_nth_usable(3) }}"
|
||||
nname_3_ipv4_4: "{{ subnet_ipv4 | next_nth_usable(4) }}"
|
||||
nname_3_ipv6_2: "{{ subnet_ipv6 | next_nth_usable(2) }}"
|
||||
nname_3_ipv6_3: "{{ subnet_ipv6 | next_nth_usable(3) }}"
|
||||
nname_3_ipv6_4: "{{ subnet_ipv6 | next_nth_usable(4) }}"
|
||||
|
||||
- debug:
|
||||
msg: "Chose random IPv4 subnet {{ subnet_ipv4 }} and random IPv6 subnet {{ subnet_ipv6 }}"
|
||||
|
||||
- name: Create network with fixed IPv4 and IPv6 subnets
|
||||
docker_network:
|
||||
name: "{{ nname_3 }}"
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: "{{ subnet_ipv4 }}"
|
||||
- subnet: "{{ subnet_ipv6 }}"
|
||||
state: present
|
||||
when: docker_py_version is version('1.10.0', '>=')
|
||||
|
||||
####################################################################
|
||||
## network_mode ####################################################
|
||||
####################################################################
|
||||
|
||||
- name: network_mode
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
network_mode: host
|
||||
register: network_mode_1
|
||||
|
||||
- name: network_mode (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
network_mode: host
|
||||
register: network_mode_2
|
||||
|
||||
- name: network_mode (change)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
network_mode: none
|
||||
force_kill: yes
|
||||
register: network_mode_3
|
||||
|
||||
- name: network_mode (container mode setup)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname_h1 }}"
|
||||
state: started
|
||||
register: cname_h1_id
|
||||
|
||||
- name: network_mode (container mode)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
network_mode: "container:{{ cname_h1_id.container.Id }}"
|
||||
force_kill: yes
|
||||
register: network_mode_4
|
||||
|
||||
- name: network_mode (container mode idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
network_mode: "container:{{ cname_h1 }}"
|
||||
register: network_mode_5
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ container_name }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
loop:
|
||||
- "{{ cname }}"
|
||||
- "{{ cname_h1 }}"
|
||||
loop_control:
|
||||
loop_var: container_name
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network_mode_1 is changed
|
||||
- network_mode_1.container.HostConfig.NetworkMode == 'host'
|
||||
- network_mode_2 is not changed
|
||||
- network_mode_2.container.HostConfig.NetworkMode == 'host'
|
||||
- network_mode_3 is changed
|
||||
- network_mode_3.container.HostConfig.NetworkMode == 'none'
|
||||
- network_mode_4 is changed
|
||||
- network_mode_4.container.HostConfig.NetworkMode == 'container:' ~ cname_h1_id.container.Id
|
||||
- network_mode_5 is not changed
|
||||
- network_mode_5.container.HostConfig.NetworkMode == 'container:' ~ cname_h1_id.container.Id
|
||||
|
||||
####################################################################
|
||||
## networks, purge_networks for networks_cli_compatible=no #########
|
||||
####################################################################
|
||||
|
||||
- block:
|
||||
- name: networks_cli_compatible=no, networks w/o purge_networks
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_1 }}"
|
||||
- name: "{{ nname_2 }}"
|
||||
networks_cli_compatible: no
|
||||
register: networks_1
|
||||
|
||||
- name: networks_cli_compatible=no, networks w/o purge_networks
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_1 }}"
|
||||
- name: "{{ nname_2 }}"
|
||||
networks_cli_compatible: no
|
||||
register: networks_2
|
||||
|
||||
- name: networks_cli_compatible=no, networks, purge_networks
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
purge_networks: yes
|
||||
networks:
|
||||
- name: bridge
|
||||
- name: "{{ nname_1 }}"
|
||||
networks_cli_compatible: no
|
||||
force_kill: yes
|
||||
register: networks_3
|
||||
|
||||
- name: networks_cli_compatible=no, networks, purge_networks (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
purge_networks: yes
|
||||
networks:
|
||||
- name: "{{ nname_1 }}"
|
||||
- name: bridge
|
||||
networks_cli_compatible: no
|
||||
register: networks_4
|
||||
|
||||
- name: networks_cli_compatible=no, networks (less networks)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: bridge
|
||||
networks_cli_compatible: no
|
||||
register: networks_5
|
||||
|
||||
- name: networks_cli_compatible=no, networks, purge_networks (less networks)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
purge_networks: yes
|
||||
networks:
|
||||
- name: bridge
|
||||
networks_cli_compatible: no
|
||||
force_kill: yes
|
||||
register: networks_6
|
||||
|
||||
- name: networks_cli_compatible=no, networks, purge_networks (more networks)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
purge_networks: yes
|
||||
networks:
|
||||
- name: bridge
|
||||
- name: "{{ nname_2 }}"
|
||||
networks_cli_compatible: no
|
||||
force_kill: yes
|
||||
register: networks_7
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
# networks_1 has networks default, 'bridge', nname_1
|
||||
- networks_1 is changed
|
||||
- networks_1.container.NetworkSettings.Networks | length == 3
|
||||
- nname_1 in networks_1.container.NetworkSettings.Networks
|
||||
- nname_2 in networks_1.container.NetworkSettings.Networks
|
||||
- "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks"
|
||||
# networks_2 has networks default, 'bridge', nname_1
|
||||
- networks_2 is not changed
|
||||
- networks_2.container.NetworkSettings.Networks | length == 3
|
||||
- nname_1 in networks_2.container.NetworkSettings.Networks
|
||||
- nname_2 in networks_1.container.NetworkSettings.Networks
|
||||
- "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks"
|
||||
# networks_3 has networks 'bridge', nname_1
|
||||
- networks_3 is changed
|
||||
- networks_3.container.NetworkSettings.Networks | length == 2
|
||||
- nname_1 in networks_3.container.NetworkSettings.Networks
|
||||
- "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks"
|
||||
# networks_4 has networks 'bridge', nname_1
|
||||
- networks_4 is not changed
|
||||
- networks_4.container.NetworkSettings.Networks | length == 2
|
||||
- nname_1 in networks_4.container.NetworkSettings.Networks
|
||||
- "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks"
|
||||
# networks_5 has networks 'bridge', nname_1
|
||||
- networks_5 is not changed
|
||||
- networks_5.container.NetworkSettings.Networks | length == 2
|
||||
- nname_1 in networks_5.container.NetworkSettings.Networks
|
||||
- "'default' in networks_5.container.NetworkSettings.Networks or 'bridge' in networks_5.container.NetworkSettings.Networks"
|
||||
# networks_6 has networks 'bridge'
|
||||
- networks_6 is changed
|
||||
- networks_6.container.NetworkSettings.Networks | length == 1
|
||||
- "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks"
|
||||
# networks_7 has networks 'bridge', nname_2
|
||||
- networks_7 is changed
|
||||
- networks_7.container.NetworkSettings.Networks | length == 2
|
||||
- nname_2 in networks_7.container.NetworkSettings.Networks
|
||||
- "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks"
|
||||
|
||||
when: docker_py_version is version('1.10.0', '>=')
|
||||
|
||||
####################################################################
|
||||
## networks for networks_cli_compatible=yes ########################
|
||||
####################################################################
|
||||
|
||||
- block:
|
||||
- name: networks_cli_compatible=yes, networks specified
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_1 }}"
|
||||
aliases:
|
||||
- alias1
|
||||
- alias2
|
||||
- name: "{{ nname_2 }}"
|
||||
networks_cli_compatible: yes
|
||||
register: networks_1
|
||||
|
||||
- name: networks_cli_compatible=yes, networks specified
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_1 }}"
|
||||
- name: "{{ nname_2 }}"
|
||||
networks_cli_compatible: yes
|
||||
register: networks_2
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- name: networks_cli_compatible=yes, empty networks list specified
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks: []
|
||||
networks_cli_compatible: yes
|
||||
register: networks_3
|
||||
|
||||
- name: networks_cli_compatible=yes, empty networks list specified
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks: []
|
||||
networks_cli_compatible: yes
|
||||
register: networks_4
|
||||
|
||||
- name: networks_cli_compatible=yes, empty networks list specified, purge_networks
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks: []
|
||||
networks_cli_compatible: yes
|
||||
purge_networks: yes
|
||||
force_kill: yes
|
||||
register: networks_5
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- name: networks_cli_compatible=yes, networks not specified
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks_cli_compatible: yes
|
||||
force_kill: yes
|
||||
register: networks_6
|
||||
|
||||
- name: networks_cli_compatible=yes, networks not specified
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks_cli_compatible: yes
|
||||
register: networks_7
|
||||
|
||||
- name: networks_cli_compatible=yes, networks not specified, purge_networks
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks_cli_compatible: yes
|
||||
purge_networks: yes
|
||||
force_kill: yes
|
||||
register: networks_8
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- debug: var=networks_3
|
||||
|
||||
- assert:
|
||||
that:
|
||||
# networks_1 has networks nname_1, nname_2
|
||||
- networks_1 is changed
|
||||
- networks_1.container.NetworkSettings.Networks | length == 2
|
||||
- nname_1 in networks_1.container.NetworkSettings.Networks
|
||||
- nname_2 in networks_1.container.NetworkSettings.Networks
|
||||
# networks_2 has networks nname_1, nname_2
|
||||
- networks_2 is not changed
|
||||
- networks_2.container.NetworkSettings.Networks | length == 2
|
||||
- nname_1 in networks_2.container.NetworkSettings.Networks
|
||||
- nname_2 in networks_1.container.NetworkSettings.Networks
|
||||
# networks_3 has networks 'bridge'
|
||||
- networks_3 is changed
|
||||
- networks_3.container.NetworkSettings.Networks | length == 1
|
||||
- "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks"
|
||||
# networks_4 has networks 'bridge'
|
||||
- networks_4 is not changed
|
||||
- networks_4.container.NetworkSettings.Networks | length == 1
|
||||
- "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks"
|
||||
# networks_5 has no networks
|
||||
- networks_5 is changed
|
||||
- networks_5.container.NetworkSettings.Networks | length == 0
|
||||
# networks_6 has networks 'bridge'
|
||||
- networks_6 is changed
|
||||
- networks_6.container.NetworkSettings.Networks | length == 1
|
||||
- "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks"
|
||||
# networks_7 has networks 'bridge'
|
||||
- networks_7 is not changed
|
||||
- networks_7.container.NetworkSettings.Networks | length == 1
|
||||
- "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks"
|
||||
# networks_8 has no networks
|
||||
- networks_8 is changed
|
||||
- networks_8.container.NetworkSettings.Networks | length == 0
|
||||
|
||||
when: docker_py_version is version('1.10.0', '>=')
|
||||
|
||||
####################################################################
|
||||
## networks with comparisons #######################################
|
||||
####################################################################
|
||||
|
||||
- block:
|
||||
- name: create container with one network
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_1 }}"
|
||||
networks_cli_compatible: yes
|
||||
register: networks_1
|
||||
|
||||
- name: different networks, comparisons=ignore
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_2 }}"
|
||||
networks_cli_compatible: yes
|
||||
comparisons:
|
||||
networks: ignore
|
||||
register: networks_2
|
||||
|
||||
- name: less networks, comparisons=ignore
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks: []
|
||||
networks_cli_compatible: yes
|
||||
comparisons:
|
||||
networks: ignore
|
||||
register: networks_3
|
||||
|
||||
- name: less networks, comparisons=allow_more_present
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks: []
|
||||
networks_cli_compatible: yes
|
||||
comparisons:
|
||||
networks: allow_more_present
|
||||
register: networks_4
|
||||
|
||||
- name: different networks, comparisons=allow_more_present
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_2 }}"
|
||||
networks_cli_compatible: yes
|
||||
comparisons:
|
||||
networks: allow_more_present
|
||||
force_kill: yes
|
||||
register: networks_5
|
||||
|
||||
- name: different networks, comparisons=strict
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_2 }}"
|
||||
networks_cli_compatible: yes
|
||||
comparisons:
|
||||
networks: strict
|
||||
force_kill: yes
|
||||
register: networks_6
|
||||
|
||||
- name: less networks, comparisons=strict
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks: []
|
||||
networks_cli_compatible: yes
|
||||
comparisons:
|
||||
networks: strict
|
||||
force_kill: yes
|
||||
register: networks_7
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
# networks_1 has networks nname_1
|
||||
- networks_1 is changed
|
||||
- networks_1.container.NetworkSettings.Networks | length == 1
|
||||
- nname_1 in networks_1.container.NetworkSettings.Networks
|
||||
# networks_2 has networks nname_1
|
||||
- networks_2 is not changed
|
||||
- networks_2.container.NetworkSettings.Networks | length == 1
|
||||
- nname_1 in networks_2.container.NetworkSettings.Networks
|
||||
# networks_3 has networks nname_1
|
||||
- networks_3 is not changed
|
||||
- networks_3.container.NetworkSettings.Networks | length == 1
|
||||
- nname_1 in networks_3.container.NetworkSettings.Networks
|
||||
# networks_4 has networks nname_1
|
||||
- networks_4 is not changed
|
||||
- networks_4.container.NetworkSettings.Networks | length == 1
|
||||
- nname_1 in networks_4.container.NetworkSettings.Networks
|
||||
# networks_5 has networks nname_1, nname_2
|
||||
- networks_5 is changed
|
||||
- networks_5.container.NetworkSettings.Networks | length == 2
|
||||
- nname_1 in networks_5.container.NetworkSettings.Networks
|
||||
- nname_2 in networks_5.container.NetworkSettings.Networks
|
||||
# networks_6 has networks nname_2
|
||||
- networks_6 is changed
|
||||
- networks_6.container.NetworkSettings.Networks | length == 1
|
||||
- nname_2 in networks_6.container.NetworkSettings.Networks
|
||||
# networks_7 has no networks
|
||||
- networks_7 is changed
|
||||
- networks_7.container.NetworkSettings.Networks | length == 0
|
||||
|
||||
when: docker_py_version is version('1.10.0', '>=')
|
||||
|
||||
####################################################################
|
||||
## networks with IP address ########################################
|
||||
####################################################################
|
||||
|
||||
- block:
|
||||
- name: create container (stopped) with one network and fixed IP
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: stopped
|
||||
networks:
|
||||
- name: "{{ nname_3 }}"
|
||||
ipv4_address: "{{ nname_3_ipv4_2 }}"
|
||||
ipv6_address: "{{ nname_3_ipv6_2 }}"
|
||||
networks_cli_compatible: yes
|
||||
register: networks_1
|
||||
|
||||
- name: create container (stopped) with one network and fixed IP (idempotent)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: stopped
|
||||
networks:
|
||||
- name: "{{ nname_3 }}"
|
||||
ipv4_address: "{{ nname_3_ipv4_2 }}"
|
||||
ipv6_address: "{{ nname_3_ipv6_2 }}"
|
||||
networks_cli_compatible: yes
|
||||
register: networks_2
|
||||
|
||||
- name: create container (stopped) with one network and fixed IP (different IPv4)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: stopped
|
||||
networks:
|
||||
- name: "{{ nname_3 }}"
|
||||
ipv4_address: "{{ nname_3_ipv4_3 }}"
|
||||
ipv6_address: "{{ nname_3_ipv6_2 }}"
|
||||
networks_cli_compatible: yes
|
||||
register: networks_3
|
||||
|
||||
- name: create container (stopped) with one network and fixed IP (different IPv6)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: stopped
|
||||
networks:
|
||||
- name: "{{ nname_3 }}"
|
||||
ipv4_address: "{{ nname_3_ipv4_3 }}"
|
||||
ipv6_address: "{{ nname_3_ipv6_3 }}"
|
||||
networks_cli_compatible: yes
|
||||
register: networks_4
|
||||
|
||||
- name: create container (started) with one network and fixed IP
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
register: networks_5
|
||||
|
||||
- name: create container (started) with one network and fixed IP (different IPv4)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_3 }}"
|
||||
ipv4_address: "{{ nname_3_ipv4_4 }}"
|
||||
ipv6_address: "{{ nname_3_ipv6_3 }}"
|
||||
networks_cli_compatible: yes
|
||||
force_kill: yes
|
||||
register: networks_6
|
||||
|
||||
- name: create container (started) with one network and fixed IP (different IPv6)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_3 }}"
|
||||
ipv4_address: "{{ nname_3_ipv4_4 }}"
|
||||
ipv6_address: "{{ nname_3_ipv6_4 }}"
|
||||
networks_cli_compatible: yes
|
||||
force_kill: yes
|
||||
register: networks_7
|
||||
|
||||
- name: create container (started) with one network and fixed IP (idempotent)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
networks:
|
||||
- name: "{{ nname_3 }}"
|
||||
ipv4_address: "{{ nname_3_ipv4_4 }}"
|
||||
ipv6_address: "{{ nname_3_ipv6_4 }}"
|
||||
networks_cli_compatible: yes
|
||||
register: networks_8
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- networks_1 is changed
|
||||
- networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2
|
||||
- networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | ipaddr == nname_3_ipv6_2 | ipaddr
|
||||
- networks_1.container.NetworkSettings.Networks[nname_3].IPAddress == ""
|
||||
- networks_1.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
|
||||
- networks_2 is not changed
|
||||
- networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2
|
||||
- networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | ipaddr == nname_3_ipv6_2 | ipaddr
|
||||
- networks_2.container.NetworkSettings.Networks[nname_3].IPAddress == ""
|
||||
- networks_2.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
|
||||
- networks_3 is changed
|
||||
- networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3
|
||||
- networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | ipaddr == nname_3_ipv6_2 | ipaddr
|
||||
- networks_3.container.NetworkSettings.Networks[nname_3].IPAddress == ""
|
||||
- networks_3.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
|
||||
- networks_4 is changed
|
||||
- networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3
|
||||
- networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | ipaddr == nname_3_ipv6_3 | ipaddr
|
||||
- networks_4.container.NetworkSettings.Networks[nname_3].IPAddress == ""
|
||||
- networks_4.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
|
||||
- networks_5 is changed
|
||||
- networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3
|
||||
- networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | ipaddr == nname_3_ipv6_3 | ipaddr
|
||||
- networks_5.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_3
|
||||
- networks_5.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | ipaddr == nname_3_ipv6_3 | ipaddr
|
||||
- networks_6 is changed
|
||||
- networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4
|
||||
- networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | ipaddr == nname_3_ipv6_3 | ipaddr
|
||||
- networks_6.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4
|
||||
- networks_6.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | ipaddr == nname_3_ipv6_3 | ipaddr
|
||||
- networks_7 is changed
|
||||
- networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4
|
||||
- networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | ipaddr == nname_3_ipv6_4 | ipaddr
|
||||
- networks_7.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4
|
||||
- networks_7.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | ipaddr == nname_3_ipv6_4 | ipaddr
|
||||
- networks_8 is not changed
|
||||
- networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4
|
||||
- networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | ipaddr == nname_3_ipv6_4 | ipaddr
|
||||
- networks_8.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4
|
||||
- networks_8.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | ipaddr == nname_3_ipv6_4 | ipaddr
|
||||
|
||||
when: docker_py_version is version('1.10.0', '>=')
|
||||
|
||||
####################################################################
|
||||
####################################################################
|
||||
####################################################################
|
||||
|
||||
- name: Delete networks
|
||||
docker_network:
|
||||
name: "{{ network_name }}"
|
||||
state: absent
|
||||
force: yes
|
||||
loop:
|
||||
- "{{ nname_1 }}"
|
||||
- "{{ nname_2 }}"
|
||||
- "{{ nname_3 }}"
|
||||
loop_control:
|
||||
loop_var: network_name
|
||||
when: docker_py_version is version('1.10.0', '>=')
|
||||
3745
tests/integration/targets/docker_container/tasks/tests/options.yml
Normal file
3745
tests/integration/targets/docker_container/tasks/tests/options.yml
Normal file
File diff suppressed because it is too large
Load Diff
286
tests/integration/targets/docker_container/tasks/tests/ports.yml
Normal file
286
tests/integration/targets/docker_container/tasks/tests/ports.yml
Normal file
@ -0,0 +1,286 @@
|
||||
---
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cname: "{{ cname_prefix ~ '-options' }}"
|
||||
cname2: "{{ cname_prefix ~ '-options-h1' }}"
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cnames: "{{ cnames + [cname, cname2] }}"
|
||||
|
||||
####################################################################
|
||||
## published_ports: all ############################################
|
||||
####################################################################
|
||||
|
||||
- name: published_ports -- all
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
exposed_ports:
|
||||
- "9001"
|
||||
- "9002"
|
||||
published_ports:
|
||||
- all
|
||||
force_kill: yes
|
||||
register: published_ports_1
|
||||
|
||||
- name: published_ports -- all (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
exposed_ports:
|
||||
- "9001"
|
||||
- "9002"
|
||||
published_ports:
|
||||
- all
|
||||
force_kill: yes
|
||||
register: published_ports_2
|
||||
|
||||
- name: published_ports -- all (writing out 'all')
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
exposed_ports:
|
||||
- "9001"
|
||||
- "9002"
|
||||
published_ports:
|
||||
- "9001"
|
||||
- "9002"
|
||||
force_kill: yes
|
||||
register: published_ports_3
|
||||
|
||||
- name: published_ports -- all (idempotency 2)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
exposed_ports:
|
||||
- "9001"
|
||||
- "9002"
|
||||
published_ports:
|
||||
- "9002"
|
||||
- "9001"
|
||||
force_kill: yes
|
||||
register: published_ports_4
|
||||
|
||||
- name: published_ports -- all (switching back to 'all')
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
exposed_ports:
|
||||
- "9001"
|
||||
- "9002"
|
||||
published_ports:
|
||||
- all
|
||||
force_kill: yes
|
||||
register: published_ports_5
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- published_ports_1 is changed
|
||||
- published_ports_2 is not changed
|
||||
- published_ports_3 is changed
|
||||
- published_ports_4 is not changed
|
||||
- published_ports_5 is changed
|
||||
|
||||
####################################################################
|
||||
## published_ports: port range #####################################
|
||||
####################################################################
|
||||
|
||||
- name: published_ports -- port range
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
exposed_ports:
|
||||
- "9001"
|
||||
- "9010-9050"
|
||||
published_ports:
|
||||
- "9001:9001"
|
||||
- "9010-9050:9010-9050"
|
||||
force_kill: yes
|
||||
register: published_ports_1
|
||||
|
||||
- name: published_ports -- port range (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
exposed_ports:
|
||||
- "9001"
|
||||
- "9010-9050"
|
||||
published_ports:
|
||||
- "9001:9001"
|
||||
- "9010-9050:9010-9050"
|
||||
force_kill: yes
|
||||
register: published_ports_2
|
||||
|
||||
- name: published_ports -- port range (different range)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
exposed_ports:
|
||||
- "9001"
|
||||
- "9010-9050"
|
||||
published_ports:
|
||||
- "9001:9001"
|
||||
- "9020-9060:9020-9060"
|
||||
force_kill: yes
|
||||
register: published_ports_3
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- published_ports_1 is changed
|
||||
- published_ports_2 is not changed
|
||||
- published_ports_3 is changed
|
||||
|
||||
####################################################################
|
||||
## published_ports: one-element container port range ###############
|
||||
####################################################################
|
||||
|
||||
- name: published_ports -- one-element container port range
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- "9010-9050:9010"
|
||||
force_kill: yes
|
||||
loop:
|
||||
- '{{ cname }}'
|
||||
- '{{ cname2 }}'
|
||||
register: published_ports_1
|
||||
|
||||
- name: published_ports -- one-element container port range (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- "9010-9050:9010"
|
||||
force_kill: yes
|
||||
loop:
|
||||
- '{{ cname }}'
|
||||
- '{{ cname2 }}'
|
||||
register: published_ports_2
|
||||
|
||||
- name: published_ports -- one-element container port range (different range)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- "9010-9051:9010"
|
||||
force_kill: yes
|
||||
loop:
|
||||
- '{{ cname }}'
|
||||
- '{{ cname2 }}'
|
||||
register: published_ports_3
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
loop:
|
||||
- '{{ cname }}'
|
||||
- '{{ cname2 }}'
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- published_ports_1 is changed
|
||||
- published_ports_2 is not changed
|
||||
- published_ports_3 is changed
|
||||
|
||||
####################################################################
|
||||
## published_ports: IPv6 addresses #################################
|
||||
####################################################################
|
||||
|
||||
- name: published_ports -- IPv6
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- "[::1]:9001:9001"
|
||||
force_kill: yes
|
||||
register: published_ports_1
|
||||
|
||||
- name: published_ports -- IPv6 (idempotency)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- "[::1]:9001:9001"
|
||||
force_kill: yes
|
||||
register: published_ports_2
|
||||
|
||||
- name: published_ports -- IPv6 (different IP)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- "127.0.0.1:9001:9001"
|
||||
force_kill: yes
|
||||
register: published_ports_3
|
||||
|
||||
- name: published_ports -- IPv6 (hostname)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- "localhost:9001:9001"
|
||||
force_kill: yes
|
||||
register: published_ports_4
|
||||
ignore_errors: yes
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- published_ports_1 is changed
|
||||
- published_ports_2 is not changed
|
||||
- published_ports_3 is changed
|
||||
- published_ports_4 is failed
|
||||
@ -0,0 +1,34 @@
|
||||
---
|
||||
# Regression test for https://github.com/ansible/ansible/pull/45700
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cname: "{{ cname_prefix ~ '-45700' }}"
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cnames: "{{ cnames + [cname] }}"
|
||||
|
||||
- name: Start container
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
|
||||
- name: Stop container with a lot of invalid options
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
force_kill: yes
|
||||
# Some options with "invalid" values, which would
|
||||
# have to be parsed. The values are "invalid" because
|
||||
# the containers and networks listed here do not exist.
|
||||
# This can happen because the networks are removed
|
||||
# before the container is stopped (see
|
||||
# https://github.com/ansible/ansible/issues/45486).
|
||||
networks:
|
||||
- name: "nonexistant-network-{{ (2**32) | random }}"
|
||||
published_ports:
|
||||
- '1:2'
|
||||
- '3'
|
||||
links:
|
||||
- "nonexistant-container-{{ (2**32) | random }}:test"
|
||||
state: absent
|
||||
@ -0,0 +1,455 @@
|
||||
---
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cname: "{{ cname_prefix ~ '-hi' }}"
|
||||
- name: Registering container name
|
||||
set_fact:
|
||||
cnames: "{{ cnames + [cname] }}"
|
||||
|
||||
####################################################################
|
||||
## Creation ########################################################
|
||||
####################################################################
|
||||
|
||||
- name: Create container (check)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
check_mode: yes
|
||||
register: create_1
|
||||
|
||||
- name: Create container
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
register: create_2
|
||||
|
||||
- name: Create container (idempotent)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
register: create_3
|
||||
|
||||
- name: Create container (idempotent check)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
check_mode: yes
|
||||
register: create_4
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- create_1 is changed
|
||||
- create_2 is changed
|
||||
- create_3 is not changed
|
||||
- create_4 is not changed
|
||||
|
||||
####################################################################
|
||||
## Starting (after creation) #######################################
|
||||
####################################################################
|
||||
|
||||
- name: Start container (check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
check_mode: yes
|
||||
register: start_1
|
||||
|
||||
- name: Start container
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
register: start_2
|
||||
|
||||
- name: Start container (idempotent)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
register: start_3
|
||||
|
||||
- name: Start container (idempotent check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
check_mode: yes
|
||||
register: start_4
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- start_1 is changed
|
||||
- start_2 is changed
|
||||
- start_3 is not changed
|
||||
- start_4 is not changed
|
||||
|
||||
####################################################################
|
||||
## Present check for running container #############################
|
||||
####################################################################
|
||||
|
||||
- name: Present check for running container (check)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
check_mode: yes
|
||||
register: present_check_1
|
||||
|
||||
- name: Present check for running container
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
register: present_check_2
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- present_check_1 is not changed
|
||||
- present_check_2 is not changed
|
||||
|
||||
####################################################################
|
||||
## Starting (from scratch) #########################################
|
||||
####################################################################
|
||||
|
||||
- name: Remove container (setup for starting from scratch)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
|
||||
- name: Start container from scratch (check)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
stop_timeout: 1
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
check_mode: yes
|
||||
register: start_scratch_1
|
||||
|
||||
- name: Start container from scratch
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
stop_timeout: 1
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
register: start_scratch_2
|
||||
|
||||
- name: Start container from scratch (idempotent)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
stop_timeout: 1
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
register: start_scratch_3
|
||||
|
||||
- name: Start container from scratch (idempotent check)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
stop_timeout: 1
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
check_mode: yes
|
||||
register: start_scratch_4
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- start_scratch_1 is changed
|
||||
- start_scratch_2 is changed
|
||||
- start_scratch_3 is not changed
|
||||
- start_scratch_4 is not changed
|
||||
|
||||
####################################################################
|
||||
## Recreating ######################################################
|
||||
####################################################################
|
||||
|
||||
- name: Recreating container (created)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: recreate_1
|
||||
|
||||
- name: Recreating container (created, recreate, check mode)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
recreate: yes
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: recreate_2
|
||||
check_mode: yes
|
||||
|
||||
- name: Recreating container (created, recreate)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
recreate: yes
|
||||
state: present
|
||||
force_kill: yes
|
||||
register: recreate_3
|
||||
|
||||
- name: Recreating container (started)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
force_kill: yes
|
||||
register: recreate_4
|
||||
|
||||
- name: Recreating container (started, recreate, check mode)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
recreate: yes
|
||||
removal_wait_timeout: 10
|
||||
state: started
|
||||
force_kill: yes
|
||||
register: recreate_5
|
||||
check_mode: yes
|
||||
|
||||
- name: Recreating container (started, recreate)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
recreate: yes
|
||||
removal_wait_timeout: 10
|
||||
state: started
|
||||
force_kill: yes
|
||||
register: recreate_6
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- debug: var=recreate_1
|
||||
- debug: var=recreate_3
|
||||
- debug: var=recreate_4
|
||||
- debug: var=recreate_6
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- recreate_2 is changed
|
||||
- recreate_3 is changed
|
||||
- recreate_4 is changed
|
||||
- recreate_5 is changed
|
||||
- recreate_6 is changed
|
||||
- recreate_1.container.Id == recreate_2.container.Id
|
||||
- recreate_1.container.Id != recreate_3.container.Id
|
||||
- recreate_3.container.Id == recreate_4.container.Id
|
||||
- recreate_4.container.Id == recreate_5.container.Id
|
||||
- recreate_4.container.Id != recreate_6.container.Id
|
||||
|
||||
####################################################################
|
||||
## Restarting ######################################################
|
||||
####################################################################
|
||||
|
||||
- name: Restarting
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
stop_timeout: 1
|
||||
volumes:
|
||||
- /tmp/tmp
|
||||
register: restart_1
|
||||
|
||||
- name: Restarting (restart, check mode)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
restart: yes
|
||||
state: started
|
||||
stop_timeout: 1
|
||||
force_kill: yes
|
||||
register: restart_2
|
||||
check_mode: yes
|
||||
|
||||
- name: Restarting (restart)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
restart: yes
|
||||
state: started
|
||||
stop_timeout: 1
|
||||
force_kill: yes
|
||||
register: restart_3
|
||||
|
||||
- name: Restarting (verify volumes)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
stop_timeout: 1
|
||||
volumes:
|
||||
- /tmp/tmp
|
||||
register: restart_4
|
||||
|
||||
- name: cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
diff: no
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- restart_1 is changed
|
||||
- restart_2 is changed
|
||||
- restart_3 is changed
|
||||
- restart_1.container.Id == restart_3.container.Id
|
||||
- restart_4 is not changed
|
||||
|
||||
####################################################################
|
||||
## Stopping ########################################################
|
||||
####################################################################
|
||||
|
||||
- name: Stop container (check)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
name: "{{ cname }}"
|
||||
state: stopped
|
||||
stop_timeout: 1
|
||||
check_mode: yes
|
||||
register: stop_1
|
||||
|
||||
- name: Stop container
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
name: "{{ cname }}"
|
||||
state: stopped
|
||||
stop_timeout: 1
|
||||
register: stop_2
|
||||
|
||||
- name: Stop container (idempotent)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
name: "{{ cname }}"
|
||||
state: stopped
|
||||
stop_timeout: 1
|
||||
register: stop_3
|
||||
|
||||
- name: Stop container (idempotent check)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
name: "{{ cname }}"
|
||||
state: stopped
|
||||
stop_timeout: 1
|
||||
check_mode: yes
|
||||
register: stop_4
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- stop_1 is changed
|
||||
- stop_2 is changed
|
||||
- stop_3 is not changed
|
||||
- stop_4 is not changed
|
||||
|
||||
####################################################################
|
||||
## Removing ########################################################
|
||||
####################################################################
|
||||
|
||||
- name: Remove container (check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
check_mode: yes
|
||||
register: remove_1
|
||||
|
||||
- name: Remove container
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
register: remove_2
|
||||
|
||||
- name: Remove container (idempotent)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
register: remove_3
|
||||
|
||||
- name: Remove container (idempotent check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
check_mode: yes
|
||||
register: remove_4
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- remove_1 is changed
|
||||
- remove_2 is changed
|
||||
- remove_3 is not changed
|
||||
- remove_4 is not changed
|
||||
|
||||
####################################################################
|
||||
## Removing (from running) #########################################
|
||||
####################################################################
|
||||
|
||||
- name: Start container (setup for removing from running)
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
|
||||
- name: Remove container from running (check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
check_mode: yes
|
||||
register: remove_from_running_1
|
||||
|
||||
- name: Remove container from running
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
register: remove_from_running_2
|
||||
|
||||
- name: Remove container from running (idempotent)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
register: remove_from_running_3
|
||||
|
||||
- name: Remove container from running (idempotent check)
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
check_mode: yes
|
||||
register: remove_from_running_4
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- remove_from_running_1 is changed
|
||||
- remove_from_running_2 is changed
|
||||
- remove_from_running_3 is not changed
|
||||
- remove_from_running_4 is not changed
|
||||
5
tests/integration/targets/docker_container_info/aliases
Normal file
5
tests/integration/targets/docker_container_info/aliases
Normal file
@ -0,0 +1,5 @@
|
||||
shippable/posix/group2
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
skip/aix
|
||||
destructive
|
||||
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
@ -0,0 +1,64 @@
|
||||
---
|
||||
- block:
|
||||
- name: Create random container name
|
||||
set_fact:
|
||||
cname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
|
||||
|
||||
- name: Make sure container is not there
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
|
||||
- name: Inspect a non-present container
|
||||
docker_container_info:
|
||||
name: "{{ cname }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "not result.exists"
|
||||
- "'container' in result"
|
||||
- "result.container is none"
|
||||
|
||||
- name: Make sure container exists
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
state: started
|
||||
force_kill: yes
|
||||
|
||||
- name: Inspect a present container
|
||||
docker_container_info:
|
||||
name: "{{ cname }}"
|
||||
register: result
|
||||
- name: Dump docker_container_info result
|
||||
debug: var=result
|
||||
|
||||
- name: "Comparison: use 'docker inspect'"
|
||||
command: docker inspect "{{ cname }}"
|
||||
register: docker_inspect
|
||||
- set_fact:
|
||||
docker_inspect_result: "{{ docker_inspect.stdout | from_json }}"
|
||||
- name: Dump docker inspect result
|
||||
debug: var=docker_inspect_result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.exists
|
||||
- "'container' in result"
|
||||
- "result.container"
|
||||
- "result.container == docker_inspect_result[0]"
|
||||
|
||||
always:
|
||||
- name: Cleanup
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
|
||||
when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_container_info tests!"
|
||||
when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
5
tests/integration/targets/docker_host_info/aliases
Normal file
5
tests/integration/targets/docker_host_info/aliases
Normal file
@ -0,0 +1,5 @@
|
||||
shippable/posix/group2
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
destructive
|
||||
3
tests/integration/targets/docker_host_info/meta/main.yml
Normal file
3
tests/integration/targets/docker_host_info/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
@ -0,0 +1,5 @@
|
||||
- include_tasks: test_host_info.yml
|
||||
when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.21', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_host_info tests!"
|
||||
when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.21', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
@ -0,0 +1,296 @@
|
||||
---
|
||||
- name: Create random container/volume name
|
||||
set_fact:
|
||||
cname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
|
||||
vname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
|
||||
|
||||
- debug:
|
||||
msg: "Using container name '{{ cname }}' and volume name '{{ vname }}'"
|
||||
|
||||
- block:
|
||||
- name: Get info on Docker host
|
||||
docker_host_info:
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers is not defined'
|
||||
- 'output.networks is not defined'
|
||||
- 'output.volumes is not defined'
|
||||
- 'output.images is not defined'
|
||||
- 'output.disk_usage is not defined'
|
||||
|
||||
# Container and volume are created so that all lists are non-empty:
|
||||
# * container and volume lists are non-emtpy because of the created objects;
|
||||
# * image list is non-empty because the image of the container is there;
|
||||
# * network list is always non-empty (default networks).
|
||||
- name: Create container
|
||||
docker_container:
|
||||
image: alpine:3.8
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
register: container_output
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- container_output is changed
|
||||
|
||||
- name: Create a volume
|
||||
docker_volume:
|
||||
name: "{{ vname }}"
|
||||
register: volume_output
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- volume_output is changed
|
||||
|
||||
- name: Get info on Docker host and list containers
|
||||
docker_host_info:
|
||||
containers: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running and list containers
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.networks is not defined'
|
||||
- 'output.volumes is not defined'
|
||||
- 'output.images is not defined'
|
||||
- 'output.disk_usage is not defined'
|
||||
- 'output.containers[0].Image is string'
|
||||
- 'output.containers[0].ImageID is not defined'
|
||||
|
||||
- name: Get info on Docker host and list containers with verbose output
|
||||
docker_host_info:
|
||||
containers: yes
|
||||
verbose_output: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running and list containers with verbose output
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.networks is not defined'
|
||||
- 'output.volumes is not defined'
|
||||
- 'output.images is not defined'
|
||||
- 'output.disk_usage is not defined'
|
||||
- 'output.containers[0].Image is string'
|
||||
- 'output.containers[0].ImageID is string'
|
||||
|
||||
- name: Get info on Docker host and list images
|
||||
docker_host_info:
|
||||
images: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running and list images
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers is not defined'
|
||||
- 'output.networks is not defined'
|
||||
- 'output.volumes is not defined'
|
||||
- 'output.images[0].Id is string'
|
||||
- 'output.images[0].ParentId is not defined'
|
||||
- 'output.disk_usage is not defined'
|
||||
|
||||
- name: Get info on Docker host and list images with verbose output
|
||||
docker_host_info:
|
||||
images: yes
|
||||
verbose_output: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running and list images with verbose output
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers is not defined'
|
||||
- 'output.networks is not defined'
|
||||
- 'output.volumes is not defined'
|
||||
- 'output.images[0].Id is string'
|
||||
- 'output.images[0].ParentId is string'
|
||||
- 'output.disk_usage is not defined'
|
||||
|
||||
- name: Get info on Docker host and list networks
|
||||
docker_host_info:
|
||||
networks: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running and list networks
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers is not defined'
|
||||
- 'output.networks[0].Id is string'
|
||||
- 'output.networks[0].Created is not defined'
|
||||
- 'output.volumes is not defined'
|
||||
- 'output.images is not defined'
|
||||
- 'output.disk_usage is not defined'
|
||||
|
||||
- name: Get info on Docker host and list networks with verbose output
|
||||
docker_host_info:
|
||||
networks: yes
|
||||
verbose_output: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running and list networks with verbose output
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers is not defined'
|
||||
- 'output.networks[0].Id is string'
|
||||
- 'output.networks[0].Created is string'
|
||||
- 'output.volumes is not defined'
|
||||
- 'output.images is not defined'
|
||||
- 'output.disk_usage is not defined'
|
||||
|
||||
- name: Get info on Docker host and list volumes
|
||||
docker_host_info:
|
||||
volumes: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running and list volumes
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers is not defined'
|
||||
- 'output.networks is not defined'
|
||||
- 'output.volumes[0].Name is string'
|
||||
- 'output.volumes[0].Mountpoint is not defined'
|
||||
- 'output.images is not defined'
|
||||
- 'output.disk_usage is not defined'
|
||||
|
||||
- name: Get info on Docker host and list volumes with verbose output
|
||||
docker_host_info:
|
||||
volumes: yes
|
||||
verbose_output: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running and list volumes with verbose output
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers is not defined'
|
||||
- 'output.networks is not defined'
|
||||
- 'output.volumes[0].Name is string'
|
||||
- 'output.volumes[0].Mountpoint is string'
|
||||
- 'output.images is not defined'
|
||||
- 'output.disk_usage is not defined'
|
||||
|
||||
- name: Get info on Docker host and get disk usage
|
||||
docker_host_info:
|
||||
disk_usage: yes
|
||||
register: output
|
||||
ignore_errors: yes
|
||||
|
||||
- name: assert reading docker host facts when docker is running and get disk usage
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers is not defined'
|
||||
- 'output.networks is not defined'
|
||||
- 'output.volumes is not defined'
|
||||
- 'output.images is not defined'
|
||||
- 'output.disk_usage.LayersSize is number'
|
||||
- 'output.disk_usage.BuilderSize is not defined'
|
||||
when: docker_py_version is version('2.2.0', '>=')
|
||||
- assert:
|
||||
that:
|
||||
- output is failed
|
||||
- "('version is ' ~ docker_py_version ~ ' ') in output.msg"
|
||||
- "'Minimum version required is 2.2.0 ' in output.msg"
|
||||
when: docker_py_version is version('2.2.0', '<')
|
||||
|
||||
- name: Get info on Docker host and get disk usage with verbose output
|
||||
docker_host_info:
|
||||
disk_usage: yes
|
||||
verbose_output: yes
|
||||
register: output
|
||||
ignore_errors: yes
|
||||
|
||||
- name: assert reading docker host facts when docker is running and get disk usage with verbose output
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers is not defined'
|
||||
- 'output.networks is not defined'
|
||||
- 'output.volumes is not defined'
|
||||
- 'output.images is not defined'
|
||||
- 'output.disk_usage.LayersSize is number'
|
||||
- 'output.disk_usage.BuilderSize is number'
|
||||
when: docker_py_version is version('2.2.0', '>=')
|
||||
- assert:
|
||||
that:
|
||||
- output is failed
|
||||
- "('version is ' ~ docker_py_version ~ ' ') in output.msg"
|
||||
- "'Minimum version required is 2.2.0 ' in output.msg"
|
||||
when: docker_py_version is version('2.2.0', '<')
|
||||
|
||||
- name: Get info on Docker host, disk usage and get all lists together
|
||||
docker_host_info:
|
||||
volumes: yes
|
||||
containers: yes
|
||||
networks: yes
|
||||
images: yes
|
||||
disk_usage: "{{ docker_py_version is version('2.2.0', '>=') }}"
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running, disk usage and get lists together
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers[0].Image is string'
|
||||
- 'output.containers[0].ImageID is not defined'
|
||||
- 'output.networks[0].Id is string'
|
||||
- 'output.networks[0].Created is not defined'
|
||||
- 'output.volumes[0].Name is string'
|
||||
- 'output.volumes[0].Mountpoint is not defined'
|
||||
- 'output.images[0].Id is string'
|
||||
- 'output.images[0].ParentId is not defined'
|
||||
- assert:
|
||||
that:
|
||||
- 'output.disk_usage.LayersSize is number'
|
||||
- 'output.disk_usage.BuilderSize is not defined'
|
||||
when: docker_py_version is version('2.2.0', '>=')
|
||||
|
||||
- name: Get info on Docker host, disk usage and get all lists together with verbose output
|
||||
docker_host_info:
|
||||
volumes: yes
|
||||
containers: yes
|
||||
networks: yes
|
||||
images: yes
|
||||
disk_usage: "{{ docker_py_version is version('2.2.0', '>=') }}"
|
||||
verbose_output: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading docker host facts when docker is running and get disk usage with verbose output
|
||||
assert:
|
||||
that:
|
||||
- 'output.host_info.Name is string'
|
||||
- 'output.containers[0].Image is string'
|
||||
- 'output.containers[0].ImageID is string'
|
||||
- 'output.networks[0].Id is string'
|
||||
- 'output.networks[0].Created is string'
|
||||
- 'output.volumes[0].Name is string'
|
||||
- 'output.volumes[0].Mountpoint is string'
|
||||
- 'output.images[0].Id is string'
|
||||
- 'output.images[0].ParentId is string'
|
||||
- assert:
|
||||
that:
|
||||
- 'output.disk_usage.LayersSize is number'
|
||||
- 'output.disk_usage.BuilderSize is number'
|
||||
when: docker_py_version is version('2.2.0', '>=')
|
||||
|
||||
always:
|
||||
- name: Delete container
|
||||
docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
|
||||
- name: Delete volume
|
||||
docker_volume:
|
||||
name: "{{ vname }}"
|
||||
state: absent
|
||||
5
tests/integration/targets/docker_image/aliases
Normal file
5
tests/integration/targets/docker_image/aliases
Normal file
@ -0,0 +1,5 @@
|
||||
shippable/posix/group5
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
destructive
|
||||
3
tests/integration/targets/docker_image/files/Dockerfile
Normal file
3
tests/integration/targets/docker_image/files/Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM busybox
|
||||
ENV foo /bar
|
||||
WORKDIR ${foo}
|
||||
@ -0,0 +1,3 @@
|
||||
FROM busybox
|
||||
# This should fail building if docker cannot resolve some-custom-host
|
||||
RUN ping -c1 some-custom-host
|
||||
@ -0,0 +1,5 @@
|
||||
FROM alpine:3.7
|
||||
ENV INSTALL_PATH /newdata
|
||||
RUN mkdir -p $INSTALL_PATH
|
||||
|
||||
WORKDIR $INSTALL_PATH
|
||||
@ -0,0 +1,7 @@
|
||||
FROM busybox AS first
|
||||
ENV dir /first
|
||||
WORKDIR ${dir}
|
||||
|
||||
FROM busybox AS second
|
||||
ENV dir /second
|
||||
WORKDIR ${dir}
|
||||
3
tests/integration/targets/docker_image/meta/main.yml
Normal file
3
tests/integration/targets/docker_image/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker_registry
|
||||
3
tests/integration/targets/docker_image/tasks/main.yml
Normal file
3
tests/integration/targets/docker_image/tasks/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
|
||||
include_tasks:
|
||||
file: test.yml
|
||||
@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: "Loading tasks from {{ item }}"
|
||||
include_tasks: "{{ item }}"
|
||||
34
tests/integration/targets/docker_image/tasks/test.yml
Normal file
34
tests/integration/targets/docker_image/tasks/test.yml
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Create random name prefix
|
||||
set_fact:
|
||||
name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
|
||||
- name: Create image and container list
|
||||
set_fact:
|
||||
inames: []
|
||||
cnames: []
|
||||
|
||||
- debug:
|
||||
msg: "Using name prefix {{ name_prefix }}"
|
||||
|
||||
- block:
|
||||
- include_tasks: run-test.yml
|
||||
with_fileglob:
|
||||
- "tests/*.yml"
|
||||
|
||||
always:
|
||||
- name: "Make sure all images are removed"
|
||||
docker_image:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items: "{{ inames }}"
|
||||
- name: "Make sure all containers are removed"
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
with_items: "{{ cnames }}"
|
||||
|
||||
when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_image tests!"
|
||||
when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
78
tests/integration/targets/docker_image/tasks/tests/basic.yml
Normal file
78
tests/integration/targets/docker_image/tasks/tests/basic.yml
Normal file
@ -0,0 +1,78 @@
|
||||
---
|
||||
####################################################################
|
||||
## basic ###########################################################
|
||||
####################################################################
|
||||
|
||||
- name: Make sure image is not there
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
register: absent_1
|
||||
|
||||
- name: Make sure image is not there (idempotency)
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
state: absent
|
||||
register: absent_2
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- absent_2 is not changed
|
||||
|
||||
- name: Make sure image is there
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
state: present
|
||||
source: pull
|
||||
register: present_1
|
||||
|
||||
- name: Make sure image is there (idempotent)
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
state: present
|
||||
source: pull
|
||||
register: present_2
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- present_1 is changed
|
||||
- present_2 is not changed
|
||||
|
||||
- name: Make sure tag is not there
|
||||
docker_image:
|
||||
name: "hello-world:alias"
|
||||
state: absent
|
||||
|
||||
- name: Tag image with alias
|
||||
docker_image:
|
||||
source: local
|
||||
name: "hello-world:latest"
|
||||
repository: "hello-world:alias"
|
||||
register: tag_1
|
||||
|
||||
- name: Tag image with alias (idempotent)
|
||||
docker_image:
|
||||
source: local
|
||||
name: "hello-world:latest"
|
||||
repository: "hello-world:alias"
|
||||
register: tag_2
|
||||
|
||||
- name: Tag image with alias (force, still idempotent)
|
||||
docker_image:
|
||||
source: local
|
||||
name: "hello-world:latest"
|
||||
repository: "hello-world:alias"
|
||||
force_tag: yes
|
||||
register: tag_3
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- tag_1 is changed
|
||||
- tag_2 is not changed
|
||||
- tag_3 is not changed
|
||||
|
||||
- name: Cleanup alias tag
|
||||
docker_image:
|
||||
name: "hello-world:alias"
|
||||
state: absent
|
||||
@ -0,0 +1,152 @@
|
||||
---
|
||||
- name: Registering image name
|
||||
set_fact:
|
||||
iname: "{{ name_prefix ~ '-options' }}"
|
||||
|
||||
- name: Determining pushed image names
|
||||
set_fact:
|
||||
hello_world_image_base: "{{ registry_address }}/test/hello-world"
|
||||
test_image_base: "{{ registry_address }}/test/{{ iname }}"
|
||||
|
||||
- name: Registering image name
|
||||
set_fact:
|
||||
inames: "{{ inames + [iname, test_image_base ~ ':latest', hello_world_image_base ~ ':latest'] }}"
|
||||
|
||||
####################################################################
|
||||
## interact with test registry #####################################
|
||||
####################################################################
|
||||
|
||||
- name: Make sure image is not there
|
||||
docker_image:
|
||||
name: "{{ hello_world_image_base }}:latest"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- name: Make sure we have hello-world:latest
|
||||
docker_image:
|
||||
name: hello-world:latest
|
||||
source: pull
|
||||
|
||||
- name: Push image to test registry
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
repository: "{{ hello_world_image_base }}"
|
||||
push: yes
|
||||
source: local
|
||||
register: push_1
|
||||
|
||||
- name: Push image to test registry (idempotent)
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
repository: "{{ hello_world_image_base }}"
|
||||
push: yes
|
||||
source: local
|
||||
register: push_2
|
||||
|
||||
- name: Push image to test registry (force, still idempotent)
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
repository: "{{ hello_world_image_base }}"
|
||||
push: yes
|
||||
source: local
|
||||
force_tag: yes
|
||||
register: push_3
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- push_1 is changed
|
||||
- push_2 is not changed
|
||||
- push_3 is not changed
|
||||
|
||||
- name: Get facts of local image
|
||||
docker_image_info:
|
||||
name: "{{ hello_world_image_base }}:latest"
|
||||
register: facts_1
|
||||
|
||||
- name: Make sure image is not there
|
||||
docker_image:
|
||||
name: "{{ hello_world_image_base }}:latest"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- name: Get facts of local image (absent)
|
||||
docker_image_info:
|
||||
name: "{{ hello_world_image_base }}:latest"
|
||||
register: facts_2
|
||||
|
||||
- name: Pull image from test registry
|
||||
docker_image:
|
||||
name: "{{ hello_world_image_base }}:latest"
|
||||
state: present
|
||||
source: pull
|
||||
register: pull_1
|
||||
|
||||
- name: Pull image from test registry (idempotency)
|
||||
docker_image:
|
||||
name: "{{ hello_world_image_base }}:latest"
|
||||
state: present
|
||||
source: pull
|
||||
register: pull_2
|
||||
|
||||
- name: Get facts of local image (present)
|
||||
docker_image_info:
|
||||
name: "{{ hello_world_image_base }}:latest"
|
||||
register: facts_3
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- pull_1 is changed
|
||||
- pull_2 is not changed
|
||||
- facts_1.images | length == 1
|
||||
- facts_2.images | length == 0
|
||||
- facts_3.images | length == 1
|
||||
|
||||
####################################################################
|
||||
## repository ######################################################
|
||||
####################################################################
|
||||
|
||||
- name: Make sure image is not there
|
||||
docker_image:
|
||||
name: "{{ test_image_base }}:latest"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- name: repository
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
pull: no
|
||||
repository: "{{ test_image_base }}"
|
||||
source: build
|
||||
register: repository_1
|
||||
|
||||
- name: repository (idempotent)
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
pull: no
|
||||
repository: "{{ test_image_base }}"
|
||||
source: build
|
||||
register: repository_2
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- repository_1 is changed
|
||||
- repository_2 is not changed
|
||||
|
||||
- name: Get facts of image
|
||||
docker_image_info:
|
||||
name: "{{ test_image_base }}:latest"
|
||||
register: facts_1
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ test_image_base }}:latest"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- facts_1.images | length == 1
|
||||
@ -0,0 +1,48 @@
|
||||
---
|
||||
- name: Registering image name
|
||||
set_fact:
|
||||
iname: "{{ name_prefix ~ '-old-options' }}"
|
||||
|
||||
- name: Registering image name
|
||||
set_fact:
|
||||
inames: "{{ inames + [iname]}}"
|
||||
|
||||
####################################################################
|
||||
## build ###########################################################
|
||||
####################################################################
|
||||
|
||||
- name: build with old-style options
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
path: "{{ role_path }}/files"
|
||||
dockerfile: Dockerfile
|
||||
http_timeout: 60
|
||||
nocache: yes
|
||||
pull: no
|
||||
rm: no
|
||||
buildargs:
|
||||
TEST1: val1
|
||||
TEST2: val2
|
||||
TEST3: "True"
|
||||
container_limits:
|
||||
memory: 5000000
|
||||
memswap: 7000000
|
||||
source: build
|
||||
register: build
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- '"Please specify build.container_limits instead of container_limits. The container_limits option has been renamed and will be removed in Ansible 2.12." in build.warnings'
|
||||
- '"Please specify build.dockerfile instead of dockerfile. The dockerfile option has been renamed and will be removed in Ansible 2.12." in build.warnings'
|
||||
- '"Please specify build.http_timeout instead of http_timeout. The http_timeout option has been renamed and will be removed in Ansible 2.12." in build.warnings'
|
||||
- '"Please specify build.nocache instead of nocache. The nocache option has been renamed and will be removed in Ansible 2.12." in build.warnings'
|
||||
- '"Please specify build.path instead of path. The path option has been renamed and will be removed in Ansible 2.12." in build.warnings'
|
||||
- '"Please specify build.pull instead of pull. The pull option has been renamed and will be removed in Ansible 2.12." in build.warnings'
|
||||
- '"Please specify build.rm instead of rm. The rm option has been renamed and will be removed in Ansible 2.12." in build.warnings'
|
||||
- '"Please specify build.args instead of buildargs. The buildargs option has been renamed and will be removed in Ansible 2.12." in build.warnings'
|
||||
300
tests/integration/targets/docker_image/tasks/tests/options.yml
Normal file
300
tests/integration/targets/docker_image/tasks/tests/options.yml
Normal file
@ -0,0 +1,300 @@
|
||||
---
|
||||
- name: Registering image name
|
||||
set_fact:
|
||||
iname: "{{ name_prefix ~ '-options' }}"
|
||||
iname_1: "{{ name_prefix ~ '-options-1' }}"
|
||||
|
||||
- name: Registering image name
|
||||
set_fact:
|
||||
inames: "{{ inames + [iname, iname_1] }}"
|
||||
|
||||
####################################################################
|
||||
## build.args ######################################################
|
||||
####################################################################
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- name: buildargs
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
args:
|
||||
TEST1: val1
|
||||
TEST2: val2
|
||||
TEST3: "True"
|
||||
pull: no
|
||||
source: build
|
||||
register: buildargs_1
|
||||
|
||||
- name: buildargs (idempotency)
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
args:
|
||||
TEST1: val1
|
||||
TEST2: val2
|
||||
TEST3: "True"
|
||||
pull: no
|
||||
source: build
|
||||
register: buildargs_2
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- buildargs_1 is changed
|
||||
- buildargs_2 is not changed
|
||||
when: docker_py_version is version('1.6.0', '>=')
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- buildargs_1 is failed
|
||||
- buildargs_2 is failed
|
||||
when: docker_py_version is version('1.6.0', '<')
|
||||
|
||||
####################################################################
|
||||
## container_limits ################################################
|
||||
####################################################################
|
||||
|
||||
- name: container_limits (Failed due to min memory limit)
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
container_limits:
|
||||
memory: 4000
|
||||
pull: no
|
||||
source: build
|
||||
ignore_errors: yes
|
||||
register: container_limits_1
|
||||
|
||||
- name: container_limits
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
container_limits:
|
||||
memory: 5000000
|
||||
memswap: 7000000
|
||||
pull: no
|
||||
source: build
|
||||
register: container_limits_2
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
# It *sometimes* happens that the first task does not fail.
|
||||
# For now, we work around this by
|
||||
# a) requiring that if it fails, the message must
|
||||
# contain 'Minimum memory limit allowed is 4MB', and
|
||||
# b) requiring that either the first task, or the second
|
||||
# task is changed, but not both.
|
||||
- "not container_limits_1 is failed or ('Minimum memory limit allowed is 4MB') in container_limits_1.msg"
|
||||
- "container_limits_1 is changed or container_limits_2 is changed and not (container_limits_1 is changed and container_limits_2 is changed)"
|
||||
|
||||
####################################################################
|
||||
## dockerfile ######################################################
|
||||
####################################################################
|
||||
|
||||
- name: dockerfile
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
dockerfile: "MyDockerfile"
|
||||
pull: no
|
||||
source: build
|
||||
register: dockerfile_1
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- dockerfile_1 is changed
|
||||
- dockerfile_1['image']['Config']['WorkingDir'] == '/newdata'
|
||||
|
||||
####################################################################
|
||||
## force ###########################################################
|
||||
####################################################################
|
||||
|
||||
- name: Build an image
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
pull: no
|
||||
source: build
|
||||
|
||||
- name: force (changed)
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
dockerfile: "MyDockerfile"
|
||||
pull: no
|
||||
source: build
|
||||
force_source: yes
|
||||
register: force_1
|
||||
|
||||
- name: force (unchanged)
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
dockerfile: "MyDockerfile"
|
||||
pull: no
|
||||
source: build
|
||||
force_source: yes
|
||||
register: force_2
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- force_1 is changed
|
||||
- force_2 is not changed
|
||||
|
||||
####################################################################
|
||||
## load path #######################################################
|
||||
####################################################################
|
||||
|
||||
- name: Archive image
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
archive_path: "{{ output_dir }}/image.tar"
|
||||
source: pull
|
||||
register: archive_image
|
||||
|
||||
- name: remove image
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- name: load image (changed)
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
load_path: "{{ output_dir }}/image.tar"
|
||||
source: load
|
||||
register: load_image
|
||||
|
||||
- name: load image (idempotency)
|
||||
docker_image:
|
||||
name: "hello-world:latest"
|
||||
load_path: "{{ output_dir }}/image.tar"
|
||||
source: load
|
||||
register: load_image_1
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- load_image is changed
|
||||
- load_image_1 is not changed
|
||||
- archive_image['image']['Id'] == load_image['image']['Id']
|
||||
|
||||
####################################################################
|
||||
## path ############################################################
|
||||
####################################################################
|
||||
|
||||
- name: Build image
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
pull: no
|
||||
source: build
|
||||
register: path_1
|
||||
|
||||
- name: Build image (idempotency)
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
pull: no
|
||||
source: build
|
||||
register: path_2
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- path_1 is changed
|
||||
- path_2 is not changed
|
||||
|
||||
####################################################################
|
||||
## target ##########################################################
|
||||
####################################################################
|
||||
|
||||
- name: Build multi-stage image
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
dockerfile: "StagedDockerfile"
|
||||
target: first
|
||||
pull: no
|
||||
source: build
|
||||
register: dockerfile_2
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- dockerfile_2 is changed
|
||||
- dockerfile_2.image.Config.WorkingDir == '/first'
|
||||
|
||||
####################################################################
|
||||
## build.etc_hosts #################################################
|
||||
####################################################################
|
||||
|
||||
- name: Build image with custom etc_hosts
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
build:
|
||||
path: "{{ role_path }}/files"
|
||||
dockerfile: "EtcHostsDockerfile"
|
||||
pull: no
|
||||
etc_hosts:
|
||||
some-custom-host: "127.0.0.1"
|
||||
source: build
|
||||
register: path_1
|
||||
|
||||
- name: cleanup
|
||||
docker_image:
|
||||
name: "{{ iname }}"
|
||||
state: absent
|
||||
force_absent: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- path_1 is changed
|
||||
5
tests/integration/targets/docker_image_info/aliases
Normal file
5
tests/integration/targets/docker_image_info/aliases
Normal file
@ -0,0 +1,5 @@
|
||||
shippable/posix/group2
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
destructive
|
||||
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
54
tests/integration/targets/docker_image_info/tasks/main.yml
Normal file
54
tests/integration/targets/docker_image_info/tasks/main.yml
Normal file
@ -0,0 +1,54 @@
|
||||
---
|
||||
- block:
|
||||
- name: Make sure image is not there
|
||||
docker_image:
|
||||
name: alpine:3.7
|
||||
state: absent
|
||||
|
||||
- name: Inspect a non-available image
|
||||
docker_image_info:
|
||||
name: alpine:3.7
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.images|length == 0"
|
||||
|
||||
- name: Make sure images are there
|
||||
docker_image:
|
||||
name: "{{ item }}"
|
||||
source: pull
|
||||
state: present
|
||||
loop:
|
||||
- "hello-world:latest"
|
||||
- "alpine:3.8"
|
||||
|
||||
- name: Inspect an available image
|
||||
docker_image_info:
|
||||
name: hello-world:latest
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.images|length == 1"
|
||||
- "'hello-world:latest' in result.images[0].RepoTags"
|
||||
|
||||
- name: Inspect multiple images
|
||||
docker_image_info:
|
||||
name:
|
||||
- "hello-world:latest"
|
||||
- "alpine:3.8"
|
||||
register: result
|
||||
|
||||
- debug: var=result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result.images|length == 2"
|
||||
- "'hello-world:latest' in result.images[0].RepoTags"
|
||||
- "'alpine:3.8' in result.images[1].RepoTags"
|
||||
|
||||
when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_image_info tests!"
|
||||
when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
5
tests/integration/targets/docker_login/aliases
Normal file
5
tests/integration/targets/docker_login/aliases
Normal file
@ -0,0 +1,5 @@
|
||||
shippable/posix/group3
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
destructive
|
||||
3
tests/integration/targets/docker_login/meta/main.yml
Normal file
3
tests/integration/targets/docker_login/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker_registry
|
||||
3
tests/integration/targets/docker_login/tasks/main.yml
Normal file
3
tests/integration/targets/docker_login/tasks/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
|
||||
include_tasks:
|
||||
file: test.yml
|
||||
@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: "Loading tasks from {{ item }}"
|
||||
include_tasks: "{{ item }}"
|
||||
8
tests/integration/targets/docker_login/tasks/test.yml
Normal file
8
tests/integration/targets/docker_login/tasks/test.yml
Normal file
@ -0,0 +1,8 @@
|
||||
- block:
|
||||
- include_tasks: run-test.yml
|
||||
with_fileglob:
|
||||
- "tests/*.yml"
|
||||
when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_image tests!"
|
||||
when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
@ -0,0 +1,136 @@
|
||||
---
|
||||
- name: Log in with wrong password (check mode)
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
username: testuser
|
||||
password: "1234"
|
||||
state: present
|
||||
register: login_failed_check
|
||||
ignore_errors: yes
|
||||
check_mode: yes
|
||||
|
||||
- name: Log in with wrong password
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
username: testuser
|
||||
password: "1234"
|
||||
state: present
|
||||
register: login_failed
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Make sure that login failed
|
||||
assert:
|
||||
that:
|
||||
- login_failed_check is failed
|
||||
- "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed_check.msg"
|
||||
- login_failed is failed
|
||||
- "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed.msg"
|
||||
|
||||
- name: Log in (check mode)
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
username: testuser
|
||||
password: hunter2
|
||||
state: present
|
||||
register: login_1
|
||||
check_mode: yes
|
||||
|
||||
- name: Log in
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
username: testuser
|
||||
password: hunter2
|
||||
state: present
|
||||
register: login_2
|
||||
|
||||
- name: Get permissions of ~/.docker/config.json
|
||||
stat:
|
||||
path: ~/.docker/config.json
|
||||
register: login_2_stat
|
||||
|
||||
- name: Log in (idempotent)
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
username: testuser
|
||||
password: hunter2
|
||||
state: present
|
||||
register: login_3
|
||||
|
||||
- name: Log in (idempotent, check mode)
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
username: testuser
|
||||
password: hunter2
|
||||
state: present
|
||||
register: login_4
|
||||
check_mode: yes
|
||||
|
||||
- name: Make sure that login worked
|
||||
assert:
|
||||
that:
|
||||
- login_1 is changed
|
||||
- login_2 is changed
|
||||
- login_3 is not changed
|
||||
- login_4 is not changed
|
||||
- login_2_stat.stat.mode == '0600'
|
||||
|
||||
- name: Log in again with wrong password (check mode)
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
username: testuser
|
||||
password: "1234"
|
||||
state: present
|
||||
register: login_failed_check
|
||||
ignore_errors: yes
|
||||
check_mode: yes
|
||||
|
||||
- name: Log in again with wrong password
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
username: testuser
|
||||
password: "1234"
|
||||
state: present
|
||||
register: login_failed
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Make sure that login failed again
|
||||
assert:
|
||||
that:
|
||||
- login_failed_check is failed
|
||||
- "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed_check.msg"
|
||||
- login_failed is failed
|
||||
- "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed.msg"
|
||||
|
||||
- name: Log out (check mode)
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
state: absent
|
||||
register: logout_1
|
||||
check_mode: yes
|
||||
|
||||
- name: Log out
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
state: absent
|
||||
register: logout_2
|
||||
|
||||
- name: Log out (idempotent)
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
state: absent
|
||||
register: logout_3
|
||||
|
||||
- name: Log out (idempotent, check mode)
|
||||
docker_login:
|
||||
registry_url: "{{ registry_frontend_address }}"
|
||||
state: absent
|
||||
register: logout_4
|
||||
check_mode: yes
|
||||
|
||||
- name: Make sure that login worked
|
||||
assert:
|
||||
that:
|
||||
- logout_1 is changed
|
||||
- logout_2 is changed
|
||||
- logout_3 is not changed
|
||||
- logout_4 is not changed
|
||||
5
tests/integration/targets/docker_network/aliases
Normal file
5
tests/integration/targets/docker_network/aliases
Normal file
@ -0,0 +1,5 @@
|
||||
shippable/posix/group5
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
destructive
|
||||
3
tests/integration/targets/docker_network/meta/main.yml
Normal file
3
tests/integration/targets/docker_network/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
33
tests/integration/targets/docker_network/tasks/main.yml
Normal file
33
tests/integration/targets/docker_network/tasks/main.yml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: Create random name prefix
|
||||
set_fact:
|
||||
name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
|
||||
cnames: []
|
||||
dnetworks: []
|
||||
|
||||
- debug:
|
||||
msg: "Using name prefix {{ name_prefix }}"
|
||||
|
||||
- block:
|
||||
- include_tasks: run-test.yml
|
||||
with_fileglob:
|
||||
- "tests/*.yml"
|
||||
|
||||
always:
|
||||
- name: "Make sure all containers are removed"
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
loop: "{{ cnames }}"
|
||||
- name: "Make sure all networks are removed"
|
||||
docker_network:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
force: yes
|
||||
loop: "{{ dnetworks }}"
|
||||
|
||||
when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=') # FIXME: find out API version!
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_network tests!"
|
||||
when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: "Loading tasks from {{ item }}"
|
||||
include_tasks: "{{ item }}"
|
||||
134
tests/integration/targets/docker_network/tasks/tests/basic.yml
Normal file
134
tests/integration/targets/docker_network/tasks/tests/basic.yml
Normal file
@ -0,0 +1,134 @@
|
||||
---
|
||||
- name: Registering container and network names
|
||||
set_fact:
|
||||
cname_1: "{{ name_prefix ~ '-container-1' }}"
|
||||
cname_2: "{{ name_prefix ~ '-container-2' }}"
|
||||
cname_3: "{{ name_prefix ~ '-container-3' }}"
|
||||
nname_1: "{{ name_prefix ~ '-network-1' }}"
|
||||
nname_2: "{{ name_prefix ~ '-network-2' }}"
|
||||
- name: Registering container and network names
|
||||
set_fact:
|
||||
cnames: "{{ cnames + [cname_1, cname_2, cname_3] }}"
|
||||
dnetworks: "{{ dnetworks + [nname_1, nname_2] }}"
|
||||
|
||||
- name: Create containers
|
||||
docker_container:
|
||||
name: "{{ container_name }}"
|
||||
image: alpine:3.8
|
||||
command: /bin/sleep 10m
|
||||
state: started
|
||||
loop:
|
||||
- "{{ cname_1 }}"
|
||||
- "{{ cname_2 }}"
|
||||
- "{{ cname_3 }}"
|
||||
loop_control:
|
||||
loop_var: container_name
|
||||
|
||||
####################################################################
|
||||
|
||||
- name: Create network
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
register: networks_1
|
||||
|
||||
- name: Connect network to containers 1
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
connected:
|
||||
- "{{ cname_1 }}"
|
||||
register: networks_2
|
||||
|
||||
- name: Connect network to containers 1 (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
connected:
|
||||
- "{{ cname_1 }}"
|
||||
register: networks_2_idem
|
||||
|
||||
- name: Connect network to containers 1 and 2
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
connected:
|
||||
- "{{ cname_1 }}"
|
||||
- "{{ cname_2 }}"
|
||||
register: networks_3
|
||||
|
||||
- name: Connect network to containers 1 and 2 (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
connected:
|
||||
- "{{ cname_1 }}"
|
||||
- "{{ cname_2 }}"
|
||||
register: networks_3_idem
|
||||
|
||||
- name: Connect network to container 3
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
connected:
|
||||
- "{{ cname_3 }}"
|
||||
appends: yes
|
||||
register: networks_4
|
||||
|
||||
- name: Connect network to container 3 (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
connected:
|
||||
- "{{ cname_3 }}"
|
||||
appends: yes
|
||||
register: networks_4_idem
|
||||
|
||||
- name: Disconnect network from container 1
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
connected:
|
||||
- "{{ cname_2 }}"
|
||||
- "{{ cname_3 }}"
|
||||
register: networks_5
|
||||
|
||||
- name: Disconnect network from container 1 (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
connected:
|
||||
- "{{ cname_2 }}"
|
||||
- "{{ cname_3 }}"
|
||||
register: networks_5_idem
|
||||
|
||||
- name: Cleanup
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: absent
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- networks_1 is changed
|
||||
- networks_2 is changed
|
||||
- networks_2_idem is not changed
|
||||
- networks_3 is changed
|
||||
- networks_3_idem is not changed
|
||||
- networks_4 is changed
|
||||
- networks_4_idem is not changed
|
||||
- networks_5 is changed
|
||||
- networks_5_idem is not changed
|
||||
|
||||
####################################################################
|
||||
|
||||
- name: Delete containers
|
||||
docker_container:
|
||||
name: "{{ container_name }}"
|
||||
state: absent
|
||||
force_kill: yes
|
||||
loop:
|
||||
- "{{ cname_1 }}"
|
||||
- "{{ cname_2 }}"
|
||||
- "{{ cname_3 }}"
|
||||
loop_control:
|
||||
loop_var: container_name
|
||||
398
tests/integration/targets/docker_network/tasks/tests/ipam.yml
Normal file
398
tests/integration/targets/docker_network/tasks/tests/ipam.yml
Normal file
@ -0,0 +1,398 @@
|
||||
---
|
||||
- name: Registering network names
|
||||
set_fact:
|
||||
nname_ipam_0: "{{ name_prefix ~ '-network-ipam-0' }}"
|
||||
nname_ipam_1: "{{ name_prefix ~ '-network-ipam-1' }}"
|
||||
nname_ipam_2: "{{ name_prefix ~ '-network-ipam-2' }}"
|
||||
nname_ipam_3: "{{ name_prefix ~ '-network-ipam-3' }}"
|
||||
|
||||
- name: Registering network names
|
||||
set_fact:
|
||||
dnetworks: "{{ dnetworks + [nname_ipam_0, nname_ipam_1, nname_ipam_2, nname_ipam_3] }}"
|
||||
|
||||
|
||||
#################### Deprecated ipam_config ####################
|
||||
|
||||
- name: Create network with ipam_config and deprecated ipam_options (conflicting)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_0 }}"
|
||||
ipam_options:
|
||||
subnet: 172.3.29.0/24
|
||||
ipam_config:
|
||||
- subnet: 172.3.29.0/24
|
||||
register: network
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is failed
|
||||
- "network.msg == 'parameters are mutually exclusive: ipam_config|ipam_options'"
|
||||
|
||||
- name: Create network with deprecated custom IPAM options
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_0 }}"
|
||||
ipam_options:
|
||||
subnet: 172.3.29.0/24
|
||||
gateway: 172.3.29.2
|
||||
iprange: 172.3.29.0/26
|
||||
aux_addresses:
|
||||
host1: 172.3.29.3
|
||||
host2: 172.3.29.4
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
|
||||
- name: Create network with deprecated custom IPAM options (idempotence)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_0 }}"
|
||||
ipam_options:
|
||||
subnet: 172.3.29.0/24
|
||||
gateway: 172.3.29.2
|
||||
iprange: 172.3.29.0/26
|
||||
aux_addresses:
|
||||
host1: 172.3.29.3
|
||||
host2: 172.3.29.4
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is not changed
|
||||
|
||||
- name: Change of network created with deprecated custom IPAM options
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_0 }}"
|
||||
ipam_options:
|
||||
subnet: 172.3.28.0/24
|
||||
gateway: 172.3.28.2
|
||||
iprange: 172.3.28.0/26
|
||||
aux_addresses:
|
||||
host1: 172.3.28.3
|
||||
register: network
|
||||
diff: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
- network.diff.differences | length == 4
|
||||
- '"ipam_config[0].subnet" in network.diff.differences'
|
||||
- '"ipam_config[0].gateway" in network.diff.differences'
|
||||
- '"ipam_config[0].iprange" in network.diff.differences'
|
||||
- '"ipam_config[0].aux_addresses" in network.diff.differences'
|
||||
|
||||
- name: Remove gateway and iprange of network with deprecated custom IPAM options
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_0 }}"
|
||||
ipam_options:
|
||||
subnet: 172.3.28.0/24
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is not changed
|
||||
|
||||
- name: Cleanup network with deprecated custom IPAM options
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_0 }}"
|
||||
state: absent
|
||||
|
||||
|
||||
#################### IPv4 IPAM config ####################
|
||||
- name: Create network with custom IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_1 }}"
|
||||
ipam_config:
|
||||
- subnet: 172.3.27.0/24
|
||||
gateway: 172.3.27.2
|
||||
iprange: 172.3.27.0/26
|
||||
aux_addresses:
|
||||
host1: 172.3.27.3
|
||||
host2: 172.3.27.4
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
|
||||
- name: Create network with custom IPAM config (idempotence)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_1 }}"
|
||||
ipam_config:
|
||||
- subnet: 172.3.27.0/24
|
||||
gateway: 172.3.27.2
|
||||
iprange: 172.3.27.0/26
|
||||
aux_addresses:
|
||||
host1: 172.3.27.3
|
||||
host2: 172.3.27.4
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is not changed
|
||||
|
||||
- name: Change of network created with custom IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_1 }}"
|
||||
ipam_config:
|
||||
- subnet: 172.3.28.0/24
|
||||
gateway: 172.3.28.2
|
||||
iprange: 172.3.28.0/26
|
||||
aux_addresses:
|
||||
host1: 172.3.28.3
|
||||
register: network
|
||||
diff: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
- network.diff.differences | length == 4
|
||||
- '"ipam_config[0].subnet" in network.diff.differences'
|
||||
- '"ipam_config[0].gateway" in network.diff.differences'
|
||||
- '"ipam_config[0].iprange" in network.diff.differences'
|
||||
- '"ipam_config[0].aux_addresses" in network.diff.differences'
|
||||
|
||||
- name: Remove gateway and iprange of network with custom IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_1 }}"
|
||||
ipam_config:
|
||||
- subnet: 172.3.28.0/24
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is not changed
|
||||
|
||||
- name: Cleanup network with custom IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_1 }}"
|
||||
state: absent
|
||||
|
||||
|
||||
#################### IPv6 IPAM config ####################
|
||||
|
||||
- name: Create network with IPv6 IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_2 }}"
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: fdd1:ac8c:0557:7ce0::/64
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
|
||||
- name: Create network with IPv6 IPAM config (idempotence)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_2 }}"
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: fdd1:ac8c:0557:7ce0::/64
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is not changed
|
||||
|
||||
- name: Change subnet of network with IPv6 IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_2 }}"
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: fdd1:ac8c:0557:7ce1::/64
|
||||
register: network
|
||||
diff: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
- network.diff.differences | length == 1
|
||||
- network.diff.differences[0] == "ipam_config[0].subnet"
|
||||
|
||||
- name: Change subnet of network with IPv6 IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_2 }}"
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: "fdd1:ac8c:0557:7ce1::"
|
||||
register: network
|
||||
ignore_errors: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is failed
|
||||
- "network.msg == '\"fdd1:ac8c:0557:7ce1::\" is not a valid CIDR'"
|
||||
|
||||
- name: Cleanup network with IPv6 IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_2 }}"
|
||||
state: absent
|
||||
|
||||
|
||||
#################### IPv4 and IPv6 network ####################
|
||||
|
||||
- name: Create network with IPv6 and custom IPv4 IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: 172.4.27.0/24
|
||||
- subnet: fdd1:ac8c:0557:7ce2::/64
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
|
||||
- name: Change subnet order of network with IPv6 and custom IPv4 IPAM config (idempotence)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: fdd1:ac8c:0557:7ce2::/64
|
||||
- subnet: 172.4.27.0/24
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is not changed
|
||||
|
||||
- name: Remove IPv6 from network with custom IPv4 and IPv6 IPAM config (change)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
enable_ipv6: no
|
||||
ipam_config:
|
||||
- subnet: 172.4.27.0/24
|
||||
register: network
|
||||
diff: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
- network.diff.differences | length == 1
|
||||
- network.diff.differences[0] == "enable_ipv6"
|
||||
|
||||
- name: Cleanup network with IPv6 and custom IPv4 IPAM config
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
state: absent
|
||||
|
||||
|
||||
#################### multiple IPv4 networks ####################
|
||||
|
||||
- block:
|
||||
- name: Create network with two IPv4 IPAM configs
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
driver: "macvlan"
|
||||
driver_options:
|
||||
parent: "{{ ansible_default_ipv4.alias }}"
|
||||
ipam_config:
|
||||
- subnet: 172.4.27.0/24
|
||||
- subnet: 172.4.28.0/24
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
|
||||
- name: Create network with two IPv4 IPAM configs (idempotence)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
driver: "macvlan"
|
||||
driver_options:
|
||||
parent: "{{ ansible_default_ipv4.alias }}"
|
||||
ipam_config:
|
||||
- subnet: 172.4.28.0/24
|
||||
- subnet: 172.4.27.0/24
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is not changed
|
||||
|
||||
- name: Create network with two IPv4 IPAM configs (change)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
driver: "macvlan"
|
||||
driver_options:
|
||||
parent: "{{ ansible_default_ipv4.alias }}"
|
||||
ipam_config:
|
||||
- subnet: 172.4.27.0/24
|
||||
- subnet: 172.4.29.0/24
|
||||
register: network
|
||||
diff: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is changed
|
||||
- network.diff.differences | length == 1
|
||||
|
||||
- name: Create network with one IPv4 IPAM config (no change)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
driver: "macvlan"
|
||||
driver_options:
|
||||
parent: "{{ ansible_default_ipv4.alias }}"
|
||||
ipam_config:
|
||||
- subnet: 172.4.29.0/24
|
||||
register: network
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network is not changed
|
||||
|
||||
- name: Cleanup network
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
state: absent
|
||||
|
||||
when: ansible_facts.virtualization_type != 'docker'
|
||||
|
||||
|
||||
#################### IPAM driver options ####################
|
||||
|
||||
- name: Create network with IPAM driver options
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
ipam_driver: default
|
||||
ipam_driver_options:
|
||||
a: b
|
||||
register: network_1
|
||||
ignore_errors: yes
|
||||
- name: Create network with IPAM driver options (idempotence)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
ipam_driver: default
|
||||
ipam_driver_options:
|
||||
a: b
|
||||
diff: yes
|
||||
register: network_2
|
||||
ignore_errors: yes
|
||||
- name: Create network with IPAM driver options (change)
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
ipam_driver: default
|
||||
ipam_driver_options:
|
||||
a: c
|
||||
diff: yes
|
||||
register: network_3
|
||||
ignore_errors: yes
|
||||
- name: Cleanup network
|
||||
docker_network:
|
||||
name: "{{ nname_ipam_3 }}"
|
||||
state: absent
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- network_1 is changed
|
||||
- network_2 is not changed
|
||||
- network_3 is changed
|
||||
when: docker_py_version is version('2.0.0', '>=')
|
||||
- assert:
|
||||
that:
|
||||
- network_1 is failed
|
||||
- "('version is ' ~ docker_py_version ~ ' ') in network_1.msg"
|
||||
- "'Minimum version required is 2.0.0 ' in network_1.msg"
|
||||
when: docker_py_version is version('2.0.0', '<')
|
||||
240
tests/integration/targets/docker_network/tasks/tests/options.yml
Normal file
240
tests/integration/targets/docker_network/tasks/tests/options.yml
Normal file
@ -0,0 +1,240 @@
|
||||
---
|
||||
- name: Registering network name
|
||||
set_fact:
|
||||
nname_1: "{{ name_prefix ~ '-network-1' }}"
|
||||
- name: Registering network name
|
||||
set_fact:
|
||||
dnetworks: "{{ dnetworks + [nname_1] }}"
|
||||
|
||||
####################################################################
|
||||
## internal ########################################################
|
||||
####################################################################
|
||||
|
||||
- name: internal
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
internal: yes
|
||||
register: internal_1
|
||||
|
||||
- name: internal (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
internal: yes
|
||||
register: internal_2
|
||||
|
||||
- name: internal (change)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
internal: no
|
||||
register: internal_3
|
||||
|
||||
- name: cleanup
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- internal_1 is changed
|
||||
- internal_2 is not changed
|
||||
- internal_3 is changed
|
||||
|
||||
####################################################################
|
||||
## driver_options ##################################################
|
||||
####################################################################
|
||||
|
||||
- name: driver_options
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver_options:
|
||||
com.docker.network.bridge.enable_icc: 'false'
|
||||
register: driver_options_1
|
||||
|
||||
- name: driver_options (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver_options:
|
||||
com.docker.network.bridge.enable_icc: 'false'
|
||||
register: driver_options_2
|
||||
|
||||
- name: driver_options (idempotency with string translation)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver_options:
|
||||
com.docker.network.bridge.enable_icc: False
|
||||
register: driver_options_3
|
||||
|
||||
- name: driver_options (change)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver_options:
|
||||
com.docker.network.bridge.enable_icc: 'true'
|
||||
register: driver_options_4
|
||||
|
||||
- name: driver_options (idempotency with string translation)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver_options:
|
||||
com.docker.network.bridge.enable_icc: True
|
||||
register: driver_options_5
|
||||
|
||||
- name: cleanup
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- driver_options_1 is changed
|
||||
- driver_options_2 is not changed
|
||||
- driver_options_3 is not changed
|
||||
- driver_options_4 is changed
|
||||
- driver_options_5 is not changed
|
||||
|
||||
####################################################################
|
||||
## scope ###########################################################
|
||||
####################################################################
|
||||
|
||||
- block:
|
||||
- name: scope
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver: bridge
|
||||
scope: local
|
||||
register: scope_1
|
||||
|
||||
- name: scope (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver: bridge
|
||||
scope: local
|
||||
register: scope_2
|
||||
|
||||
- name: swarm
|
||||
docker_swarm:
|
||||
state: present
|
||||
advertise_addr: "{{ansible_default_ipv4.address}}"
|
||||
|
||||
# Driver change alongside scope is intentional - bridge doesn't appear to support anything but local, and overlay can't downgrade to local. Additionally, overlay reports as swarm for swarm OR global, so no change is reported in that case.
|
||||
# Test output indicates that the scope is altered, at least, so manual inspection will be required to verify this going forward, unless we come up with a test driver that supports multiple scopes.
|
||||
- name: scope (change)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver: overlay
|
||||
scope: swarm
|
||||
register: scope_3
|
||||
|
||||
- name: cleanup network
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- scope_1 is changed
|
||||
- scope_2 is not changed
|
||||
- scope_3 is changed
|
||||
|
||||
always:
|
||||
- name: cleanup swarm
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
# Requirements for docker_swarm
|
||||
when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.25', '>=')
|
||||
|
||||
####################################################################
|
||||
## attachable ######################################################
|
||||
####################################################################
|
||||
|
||||
- name: attachable
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
attachable: true
|
||||
register: attachable_1
|
||||
ignore_errors: yes
|
||||
|
||||
- name: attachable (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
attachable: true
|
||||
register: attachable_2
|
||||
ignore_errors: yes
|
||||
|
||||
- name: attachable (change)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
attachable: false
|
||||
register: attachable_3
|
||||
ignore_errors: yes
|
||||
|
||||
- name: cleanup
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- attachable_1 is changed
|
||||
- attachable_2 is not changed
|
||||
- attachable_3 is changed
|
||||
when: docker_py_version is version('2.0.0', '>=')
|
||||
- assert:
|
||||
that:
|
||||
- attachable_1 is failed
|
||||
- "('version is ' ~ docker_py_version ~ ' ') in attachable_1.msg"
|
||||
- "'Minimum version required is 2.0.0 ' in attachable_1.msg"
|
||||
when: docker_py_version is version('2.0.0', '<')
|
||||
|
||||
####################################################################
|
||||
## labels ##########################################################
|
||||
####################################################################
|
||||
|
||||
- name: labels
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.2: world
|
||||
register: labels_1
|
||||
|
||||
- name: labels (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
labels:
|
||||
ansible.test.2: world
|
||||
ansible.test.1: hello
|
||||
register: labels_2
|
||||
|
||||
- name: labels (less labels)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
register: labels_3
|
||||
|
||||
- name: labels (more labels)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
labels:
|
||||
ansible.test.1: hello
|
||||
ansible.test.3: ansible
|
||||
register: labels_4
|
||||
|
||||
- name: cleanup
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- labels_1 is changed
|
||||
- labels_2 is not changed
|
||||
- labels_3 is not changed
|
||||
- labels_4 is changed
|
||||
@ -0,0 +1,61 @@
|
||||
---
|
||||
- name: Registering network name
|
||||
set_fact:
|
||||
nname_1: "{{ name_prefix ~ '-network-1' }}"
|
||||
- name: Registering network name
|
||||
set_fact:
|
||||
dnetworks: "{{ dnetworks + [nname_1] }}"
|
||||
|
||||
####################################################################
|
||||
## overlay #########################################################
|
||||
####################################################################
|
||||
|
||||
- block:
|
||||
# Overlay networks require swarm initialization before they'll work
|
||||
- name: swarm
|
||||
docker_swarm:
|
||||
state: present
|
||||
advertise_addr: "{{ansible_default_ipv4.address}}"
|
||||
|
||||
- name: overlay
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver: overlay
|
||||
driver_options:
|
||||
com.docker.network.driver.overlay.vxlanid_list: "257"
|
||||
register: overlay_1
|
||||
|
||||
- name: overlay (idempotency)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver: overlay
|
||||
driver_options:
|
||||
com.docker.network.driver.overlay.vxlanid_list: "257"
|
||||
register: overlay_2
|
||||
|
||||
- name: overlay (change)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
driver: bridge
|
||||
register: overlay_3
|
||||
|
||||
- name: cleanup network
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- overlay_1 is changed
|
||||
- overlay_2 is not changed
|
||||
- overlay_3 is changed
|
||||
|
||||
always:
|
||||
- name: cleanup swarm
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
# Requirements for docker_swarm
|
||||
when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.25', '>=')
|
||||
@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: Registering container and network names
|
||||
set_fact:
|
||||
nname_1: "{{ name_prefix ~ '-network-foo' }}"
|
||||
nname_2: "{{ name_prefix ~ '-network-foobar' }}"
|
||||
- name: Registering container and network names
|
||||
set_fact:
|
||||
dnetworks: "{{ dnetworks + [nname_1, nname_2] }}"
|
||||
|
||||
####################################################################
|
||||
|
||||
- name: Create network (superstring)
|
||||
docker_network:
|
||||
name: "{{ nname_2 }}"
|
||||
state: present
|
||||
register: networks_1
|
||||
|
||||
- name: Create network (substring)
|
||||
docker_network:
|
||||
name: "{{ nname_1 }}"
|
||||
state: present
|
||||
register: networks_2
|
||||
|
||||
- name: Cleanup
|
||||
docker_network:
|
||||
name: "{{ network_name }}"
|
||||
state: absent
|
||||
loop:
|
||||
- "{{ nname_1 }}"
|
||||
- "{{ nname_2 }}"
|
||||
loop_control:
|
||||
loop_var: network_name
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- networks_1 is changed
|
||||
- networks_2 is changed
|
||||
5
tests/integration/targets/docker_network_info/aliases
Normal file
5
tests/integration/targets/docker_network_info/aliases
Normal file
@ -0,0 +1,5 @@
|
||||
shippable/posix/group2
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
destructive
|
||||
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
60
tests/integration/targets/docker_network_info/tasks/main.yml
Normal file
60
tests/integration/targets/docker_network_info/tasks/main.yml
Normal file
@ -0,0 +1,60 @@
|
||||
---
|
||||
- block:
|
||||
- name: Create random network name
|
||||
set_fact:
|
||||
nname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
|
||||
|
||||
- name: Make sure network is not there
|
||||
docker_network:
|
||||
name: "{{ nname }}"
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
- name: Inspect a non-present network
|
||||
docker_network_info:
|
||||
name: "{{ nname }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "not result.exists"
|
||||
- "'network' in result"
|
||||
- "result.network is none"
|
||||
|
||||
- name: Make sure network exists
|
||||
docker_network:
|
||||
name: "{{ nname }}"
|
||||
state: present
|
||||
|
||||
- name: Inspect a present network
|
||||
docker_network_info:
|
||||
name: "{{ nname }}"
|
||||
register: result
|
||||
- name: Dump docker_network_info result
|
||||
debug: var=result
|
||||
|
||||
- name: "Comparison: use 'docker network inspect'"
|
||||
command: docker network inspect "{{ nname }}"
|
||||
register: docker_inspect
|
||||
- set_fact:
|
||||
docker_inspect_result: "{{ docker_inspect.stdout | from_json }}"
|
||||
- name: Dump docker inspect result
|
||||
debug: var=docker_inspect_result
|
||||
|
||||
- name: Cleanup
|
||||
docker_network:
|
||||
name: "{{ nname }}"
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.exists
|
||||
- "'network' in result"
|
||||
- "result.network"
|
||||
- "result.network == docker_inspect_result[0]"
|
||||
|
||||
when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.21', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_network_info tests!"
|
||||
when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.21', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
11
tests/integration/targets/docker_node/aliases
Normal file
11
tests/integration/targets/docker_node/aliases
Normal file
@ -0,0 +1,11 @@
|
||||
shippable/posix/group2
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
destructive
|
||||
skip/docker # The tests sometimes make docker daemon unstable; hence,
|
||||
# we skip all docker-based CI runs to avoid disrupting
|
||||
# the whole CI system. On VMs, we restart docker daemon
|
||||
# after finishing the tests to minimize potential effects
|
||||
# on other tests.
|
||||
needs/root
|
||||
3
tests/integration/targets/docker_node/meta/main.yml
Normal file
3
tests/integration/targets/docker_node/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
31
tests/integration/targets/docker_node/tasks/main.yml
Normal file
31
tests/integration/targets/docker_node/tasks/main.yml
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
# Run the tests
|
||||
- block:
|
||||
- include_tasks: test_node.yml
|
||||
|
||||
always:
|
||||
- name: Cleanup (trying)
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
diff: no
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Restart docker daemon
|
||||
service:
|
||||
name: docker
|
||||
state: restarted
|
||||
become: yes
|
||||
- name: Wait for docker daemon to be fully restarted
|
||||
command: docker ps
|
||||
|
||||
- name: Cleanup
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
diff: no
|
||||
|
||||
when: docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_node tests!"
|
||||
when: not(docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
840
tests/integration/targets/docker_node/tasks/test_node.yml
Normal file
840
tests/integration/targets/docker_node/tasks/test_node.yml
Normal file
@ -0,0 +1,840 @@
|
||||
---
|
||||
- block:
|
||||
- name: Make sure we're not already using Docker swarm
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
|
||||
- name: Try to get docker_node_info when docker is not running in swarm mode
|
||||
docker_node_info:
|
||||
ignore_errors: yes
|
||||
register: output
|
||||
|
||||
- name: assert failure when called when swarm is not in use or not run on manager node
|
||||
assert:
|
||||
that:
|
||||
- 'output is failed'
|
||||
- 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
|
||||
|
||||
- name: Create a Swarm cluster
|
||||
docker_swarm:
|
||||
state: present
|
||||
advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
|
||||
register: output
|
||||
|
||||
- name: assert changed when create a new swarm cluster
|
||||
assert:
|
||||
that:
|
||||
- 'output is changed'
|
||||
- 'output.actions[0] | regex_search("New Swarm cluster created: ")'
|
||||
- 'output.swarm_facts.JoinTokens.Manager'
|
||||
- 'output.swarm_facts.JoinTokens.Worker'
|
||||
|
||||
- name: Try to get docker_node_info when docker is running in swarm mode and as manager
|
||||
docker_node_info:
|
||||
register: output
|
||||
|
||||
- name: assert reading docker swarm node facts
|
||||
assert:
|
||||
that:
|
||||
- 'output.nodes | length > 0'
|
||||
- 'output.nodes[0].ID is string'
|
||||
|
||||
- name: Register node ID
|
||||
set_fact:
|
||||
nodeid: "{{ output.nodes[0].ID }}"
|
||||
|
||||
####################################################################
|
||||
## Set node as swarm manager #######################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to set node as manager (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
role: manager
|
||||
check_mode: yes
|
||||
register: set_as_manager_1
|
||||
|
||||
- name: Try to set node as manager
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
role: manager
|
||||
register: set_as_manager_2
|
||||
|
||||
- name: Try to set node as manager (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
role: manager
|
||||
register: set_as_manager_3
|
||||
|
||||
- name: Try to set node as manager (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
role: manager
|
||||
check_mode: yes
|
||||
register: set_as_manager_4
|
||||
|
||||
- name: assert that node role does has not changed
|
||||
assert:
|
||||
that:
|
||||
- 'set_as_manager_1 is not changed'
|
||||
- 'set_as_manager_2 is not changed'
|
||||
- 'set_as_manager_3 is not changed'
|
||||
- 'set_as_manager_4 is not changed'
|
||||
- 'set_as_manager_1.node.Spec.Role == "manager"'
|
||||
- 'set_as_manager_2.node.Spec.Role == "manager"'
|
||||
- 'set_as_manager_3.node.Spec.Role == "manager"'
|
||||
- 'set_as_manager_4.node.Spec.Role == "manager"'
|
||||
|
||||
####################################################################
|
||||
## Set node as swarm worker ########################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to set node as worker (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
role: worker
|
||||
check_mode: yes
|
||||
register: set_as_worker_1
|
||||
|
||||
- name: Try to set node as worker
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
role: worker
|
||||
ignore_errors: yes
|
||||
register: set_as_worker_2
|
||||
|
||||
- name: assert that node cannot change role to worker
|
||||
assert:
|
||||
that:
|
||||
- 'set_as_worker_1 is changed'
|
||||
- 'set_as_worker_2 is failed'
|
||||
- 'set_as_worker_2.msg | regex_search("attempting to demote the last manager of the swarm")'
|
||||
|
||||
####################################################################
|
||||
## Set node as pasued ##############################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to set node availability as paused (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: pause
|
||||
check_mode: yes
|
||||
register: set_as_paused_1
|
||||
|
||||
- name: Try to set node availability as paused
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: pause
|
||||
register: set_as_paused_2
|
||||
|
||||
- name: Try to set node availability as paused (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: pause
|
||||
register: set_as_paused_3
|
||||
|
||||
- name: Try to set node availability as paused (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: pause
|
||||
check_mode: yes
|
||||
register: set_as_paused_4
|
||||
|
||||
- name: assert node changed availability to paused
|
||||
assert:
|
||||
that:
|
||||
- 'set_as_paused_1 is changed'
|
||||
- 'set_as_paused_2 is changed'
|
||||
- 'set_as_paused_3 is not changed'
|
||||
- 'set_as_paused_4 is not changed'
|
||||
- 'set_as_paused_2.node.Spec.Availability == "pause"'
|
||||
|
||||
####################################################################
|
||||
## Set node as drained #############################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to set node availability as drained (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: drain
|
||||
check_mode: yes
|
||||
register: output_drain_1
|
||||
|
||||
- name: Try to set node availability as drained
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: drain
|
||||
register: output_drain_2
|
||||
|
||||
- name: Try to set node availability as drained (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: drain
|
||||
register: output_drain_3
|
||||
|
||||
- name: Try to set node availability as drained (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: drain
|
||||
check_mode: yes
|
||||
register: output_drain_4
|
||||
|
||||
- name: assert node changed availability to drained
|
||||
assert:
|
||||
that:
|
||||
- 'output_drain_1 is changed'
|
||||
- 'output_drain_2 is changed'
|
||||
- 'output_drain_3 is not changed'
|
||||
- 'output_drain_4 is not changed'
|
||||
- 'output_drain_2.node.Spec.Availability == "drain"'
|
||||
|
||||
|
||||
####################################################################
|
||||
## Set node as active ##############################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to set node availability as active (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: active
|
||||
check_mode: yes
|
||||
register: output_active_1
|
||||
|
||||
- name: Try to set node availability as active
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: active
|
||||
register: output_active_2
|
||||
|
||||
- name: Try to set node availability as active (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: active
|
||||
register: output_active_3
|
||||
|
||||
- name: Try to set node availability as active (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
availability: active
|
||||
check_mode: yes
|
||||
register: output_active_4
|
||||
|
||||
- name: assert node changed availability to active
|
||||
assert:
|
||||
that:
|
||||
- 'output_active_1 is changed'
|
||||
- 'output_active_2 is changed'
|
||||
- 'output_active_3 is not changed'
|
||||
- 'output_active_4 is not changed'
|
||||
- 'output_active_2.node.Spec.Availability == "active"'
|
||||
|
||||
####################################################################
|
||||
## Add single label ###############################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to add single label to swarm node (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label1: value1
|
||||
check_mode: yes
|
||||
register: output_add_single_label_1
|
||||
|
||||
- name: Try to add single label to swarm node
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label1: value1
|
||||
register: output_add_single_label_2
|
||||
|
||||
- name: Try to add single label to swarm node (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label1: value1
|
||||
register: output_add_single_label_3
|
||||
|
||||
- name: Try to add single label to swarm node (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label1: value1
|
||||
check_mode: yes
|
||||
register: output_add_single_label_4
|
||||
|
||||
- name: assert adding single label to swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_add_single_label_1 is changed'
|
||||
- 'output_add_single_label_2 is changed'
|
||||
- 'output_add_single_label_3 is not changed'
|
||||
- 'output_add_single_label_4 is not changed'
|
||||
- 'output_add_single_label_2.node.Spec.Labels | length == 1'
|
||||
- 'output_add_single_label_2.node.Spec.Labels.label1 == "value1"'
|
||||
|
||||
####################################################################
|
||||
## Add multiple labels #############################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to add five labels to swarm node (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label2: value2
|
||||
label3: value3
|
||||
label4: value4
|
||||
label5: value5
|
||||
label6: value6
|
||||
check_mode: yes
|
||||
register: output_add_multiple_labels_1
|
||||
|
||||
- name: Try to add five labels to swarm node
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label2: value2
|
||||
label3: value3
|
||||
label4: value4
|
||||
label5: value5
|
||||
label6: value6
|
||||
register: output_add_multiple_labels_2
|
||||
|
||||
- name: Try to add five labels to swarm node (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label2: value2
|
||||
label3: value3
|
||||
label4: value4
|
||||
label5: value5
|
||||
label6: value6
|
||||
register: output_add_multiple_labels_3
|
||||
|
||||
- name: Try to add five labels to swarm node (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label2: value2
|
||||
label3: value3
|
||||
label4: value4
|
||||
label5: value5
|
||||
label6: value6
|
||||
check_mode: yes
|
||||
register: output_add_multiple_labels_4
|
||||
|
||||
- name: assert adding multiple labels to swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_add_multiple_labels_1 is changed'
|
||||
- 'output_add_multiple_labels_2 is changed'
|
||||
- 'output_add_multiple_labels_3 is not changed'
|
||||
- 'output_add_multiple_labels_4 is not changed'
|
||||
- 'output_add_multiple_labels_2.node.Spec.Labels | length == 6'
|
||||
- 'output_add_multiple_labels_2.node.Spec.Labels.label1 == "value1"'
|
||||
- 'output_add_multiple_labels_2.node.Spec.Labels.label6 == "value6"'
|
||||
|
||||
####################################################################
|
||||
## Update label value ##############################################
|
||||
####################################################################
|
||||
|
||||
- name: Update value of existing label (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label1: value1111
|
||||
check_mode: yes
|
||||
register: output_update_label_1
|
||||
|
||||
- name: Update value of existing label
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label1: value1111
|
||||
register: output_update_label_2
|
||||
|
||||
- name: Update value of existing label (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label1: value1111
|
||||
register: output_update_label_3
|
||||
|
||||
- name: Update value of existing label (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label1: value1111
|
||||
check_mode: yes
|
||||
register: output_update_label_4
|
||||
|
||||
- name: assert updating single label assigned to swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_update_label_1 is changed'
|
||||
- 'output_update_label_2 is changed'
|
||||
- 'output_update_label_3 is not changed'
|
||||
- 'output_update_label_4 is not changed'
|
||||
- 'output_update_label_2.node.Spec.Labels | length == 6'
|
||||
- 'output_update_label_2.node.Spec.Labels.label1 == "value1111"'
|
||||
- 'output_update_label_2.node.Spec.Labels.label5 == "value5"'
|
||||
|
||||
####################################################################
|
||||
## Update multiple labels values ###################################
|
||||
####################################################################
|
||||
|
||||
- name: Update value of multiple existing label (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label2: value2222
|
||||
label3: value3333
|
||||
check_mode: yes
|
||||
register: output_update_labels_1
|
||||
|
||||
- name: Update value of multiple existing label
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label2: value2222
|
||||
label3: value3333
|
||||
register: output_update_labels_2
|
||||
|
||||
- name: Update value of multiple existing label (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label2: value2222
|
||||
label3: value3333
|
||||
register: output_update_labels_3
|
||||
|
||||
- name: Update value of multiple existing label (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label2: value2222
|
||||
label3: value3333
|
||||
check_mode: yes
|
||||
register: output_update_labels_4
|
||||
|
||||
- name: assert updating multiple labels assigned to swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_update_labels_1 is changed'
|
||||
- 'output_update_labels_2 is changed'
|
||||
- 'output_update_labels_3 is not changed'
|
||||
- 'output_update_labels_4 is not changed'
|
||||
- 'output_update_labels_2.node.Spec.Labels | length == 6'
|
||||
- 'output_update_labels_2.node.Spec.Labels.label1 == "value1111"'
|
||||
- 'output_update_labels_2.node.Spec.Labels.label3 == "value3333"'
|
||||
- 'output_update_labels_2.node.Spec.Labels.label5 == "value5"'
|
||||
|
||||
####################################################################
|
||||
## Remove single label #############################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to remove single existing label from swarm node (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label1
|
||||
check_mode: yes
|
||||
register: output_remove_label_1
|
||||
|
||||
- name: Try to remove single existing label from swarm node
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label1
|
||||
register: output_remove_label_2
|
||||
|
||||
- name: Try to remove single existing label from swarm node (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label1
|
||||
register: output_remove_label_3
|
||||
|
||||
- name: Try to remove single existing label from swarm node (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label1
|
||||
check_mode: yes
|
||||
register: output_remove_label_4
|
||||
|
||||
- name: assert removing single label from swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_remove_label_1 is changed'
|
||||
- 'output_remove_label_2 is changed'
|
||||
- 'output_remove_label_3 is not changed'
|
||||
- 'output_remove_label_4 is not changed'
|
||||
- 'output_remove_label_2.node.Spec.Labels | length == 5'
|
||||
- '"label1" not in output_remove_label_2.node.Spec.Labels'
|
||||
- 'output_remove_label_2.node.Spec.Labels.label3 == "value3333"'
|
||||
- 'output_remove_label_2.node.Spec.Labels.label5 == "value5"'
|
||||
|
||||
|
||||
####################################################################
|
||||
## Remove single not assigned to swarm label #######################
|
||||
####################################################################
|
||||
|
||||
- name: Try to remove single non-existing label from swarm node (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- labelnotexist
|
||||
check_mode: yes
|
||||
register: output_remove_nonexist_label_1
|
||||
|
||||
- name: Try to remove single non-existing label from swarm node
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- labelnotexist
|
||||
register: output_remove_nonexist_label_2
|
||||
|
||||
- name: Try to remove single non-existing label from swarm node (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- labelnotexist
|
||||
register: output_remove_nonexist_label_3
|
||||
|
||||
- name: Try to remove single non-existing label from swarm node (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- labelnotexist
|
||||
check_mode: yes
|
||||
register: output_remove_nonexist_label_4
|
||||
|
||||
- name: assert removing single non-existing label from swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_remove_nonexist_label_1 is not changed'
|
||||
- 'output_remove_nonexist_label_2 is not changed'
|
||||
- 'output_remove_nonexist_label_3 is not changed'
|
||||
- 'output_remove_nonexist_label_4 is not changed'
|
||||
- 'output_remove_nonexist_label_2.node.Spec.Labels | length == 5'
|
||||
- '"label1" not in output_remove_nonexist_label_2.node.Spec.Labels'
|
||||
- 'output_remove_nonexist_label_2.node.Spec.Labels.label3 == "value3333"'
|
||||
- 'output_remove_nonexist_label_2.node.Spec.Labels.label5 == "value5"'
|
||||
|
||||
####################################################################
|
||||
## Remove multiple labels ##########################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to remove two existing labels from swarm node (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label2
|
||||
- label3
|
||||
check_mode: yes
|
||||
register: output_remove_label_1
|
||||
|
||||
- name: Try to remove two existing labels from swarm node
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label2
|
||||
- label3
|
||||
register: output_remove_label_2
|
||||
|
||||
- name: Try to remove two existing labels from swarm node (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label2
|
||||
- label3
|
||||
register: output_remove_label_3
|
||||
|
||||
- name: Try to remove two existing labels from swarm node (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label2
|
||||
- label3
|
||||
check_mode: yes
|
||||
register: output_remove_label_4
|
||||
|
||||
- name: assert removing multiple labels from swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_remove_label_1 is changed'
|
||||
- 'output_remove_label_2 is changed'
|
||||
- 'output_remove_label_3 is not changed'
|
||||
- 'output_remove_label_4 is not changed'
|
||||
- 'output_remove_label_2.node.Spec.Labels | length == 3'
|
||||
- '"label1" not in output_remove_label_2.node.Spec.Labels'
|
||||
- '"label2" not in output_remove_label_2.node.Spec.Labels'
|
||||
- 'output_remove_label_2.node.Spec.Labels.label5 == "value5"'
|
||||
|
||||
####################################################################
|
||||
## Remove multiple labels, mix assigned and not assigned ##########
|
||||
####################################################################
|
||||
|
||||
- name: Try to remove mix of existing amd non-existing labels from swarm node (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label4
|
||||
- labelisnotthere
|
||||
check_mode: yes
|
||||
register: output_remove_mix_labels_1
|
||||
|
||||
- name: Try to remove mix of existing amd non-existing labels from swarm node
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label4
|
||||
- labelisnotthere
|
||||
register: output_remove_mix_labels_2
|
||||
|
||||
- name: Try to remove mix of existing amd non-existing labels from swarm node (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label4
|
||||
- labelisnotthere
|
||||
register: output_remove_mix_labels_3
|
||||
|
||||
- name: Try to remove mix of existing amd non-existing labels from swarm node (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_to_remove:
|
||||
- label4
|
||||
- labelisnotthere
|
||||
check_mode: yes
|
||||
register: output_remove_mix_labels_4
|
||||
|
||||
- name: assert removing mix of existing and non-existing labels from swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_remove_mix_labels_1 is changed'
|
||||
- 'output_remove_mix_labels_2 is changed'
|
||||
- 'output_remove_mix_labels_3 is not changed'
|
||||
- 'output_remove_mix_labels_4 is not changed'
|
||||
- 'output_remove_mix_labels_2.node.Spec.Labels | length == 2'
|
||||
- '"label1" not in output_remove_mix_labels_2.node.Spec.Labels'
|
||||
- '"label4" not in output_remove_mix_labels_2.node.Spec.Labels'
|
||||
- 'output_remove_mix_labels_2.node.Spec.Labels.label5 == "value5"'
|
||||
|
||||
####################################################################
|
||||
## Add and remove labels ###########################################
|
||||
####################################################################
|
||||
|
||||
- name: Try to add and remove nonoverlapping labels at the same time (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label7: value7
|
||||
label8: value8
|
||||
labels_to_remove:
|
||||
- label5
|
||||
check_mode: yes
|
||||
register: output_add_del_labels_1
|
||||
|
||||
- name: Try to add and remove nonoverlapping labels at the same time
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label7: value7
|
||||
label8: value8
|
||||
labels_to_remove:
|
||||
- label5
|
||||
register: output_add_del_labels_2
|
||||
|
||||
- name: Try to add and remove nonoverlapping labels at the same time (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label7: value7
|
||||
label8: value8
|
||||
labels_to_remove:
|
||||
- label5
|
||||
register: output_add_del_labels_3
|
||||
|
||||
- name: Try to add and remove nonoverlapping labels at the same time (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label7: value7
|
||||
label8: value8
|
||||
labels_to_remove:
|
||||
- label5
|
||||
check_mode: yes
|
||||
register: output_add_del_labels_4
|
||||
|
||||
- name: assert adding and removing nonoverlapping labels from swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_add_del_labels_1 is changed'
|
||||
- 'output_add_del_labels_2 is changed'
|
||||
- 'output_add_del_labels_3 is not changed'
|
||||
- 'output_add_del_labels_4 is not changed'
|
||||
- 'output_add_del_labels_2.node.Spec.Labels | length == 3'
|
||||
- '"label5" not in output_add_del_labels_2.node.Spec.Labels'
|
||||
- 'output_add_del_labels_2.node.Spec.Labels.label8 == "value8"'
|
||||
|
||||
####################################################################
|
||||
## Add and remove labels with label in both lists ##################
|
||||
####################################################################
|
||||
|
||||
- name: Try to add or update and remove overlapping labels at the same time (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label22: value22
|
||||
label6: value6666
|
||||
labels_to_remove:
|
||||
- label6
|
||||
- label7
|
||||
check_mode: yes
|
||||
register: output_add_del_overlap_lables_1
|
||||
|
||||
- name: Try to add or update and remove overlapping labels at the same time
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label22: value22
|
||||
label6: value6666
|
||||
labels_to_remove:
|
||||
- label6
|
||||
- label7
|
||||
register: output_add_del_overlap_lables_2
|
||||
|
||||
- name: Try to add or update and remove overlapping labels at the same time (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label22: value22
|
||||
label6: value6666
|
||||
labels_to_remove:
|
||||
- label6
|
||||
- label7
|
||||
register: output_add_del_overlap_lables_3
|
||||
|
||||
- name: Try to add or update and remove overlapping labels at the same time (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label22: value22
|
||||
label6: value6666
|
||||
labels_to_remove:
|
||||
- label6
|
||||
- label7
|
||||
check_mode: yes
|
||||
register: output_add_del_overlap_lables_4
|
||||
|
||||
- name: assert adding or updating and removing overlapping labels from swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_add_del_overlap_lables_1 is changed'
|
||||
- 'output_add_del_overlap_lables_2 is changed'
|
||||
- 'output_add_del_overlap_lables_3 is not changed'
|
||||
- 'output_add_del_overlap_lables_4 is not changed'
|
||||
- 'output_add_del_overlap_lables_2.node.Spec.Labels | length == 3'
|
||||
- '"label7" not in output_add_del_overlap_lables_2.node.Spec.Labels'
|
||||
- 'output_add_del_overlap_lables_2.node.Spec.Labels.label6 == "value6666"'
|
||||
- 'output_add_del_overlap_lables_2.node.Spec.Labels.label22 == "value22"'
|
||||
|
||||
####################################################################
|
||||
## Replace labels #############################################
|
||||
####################################################################
|
||||
|
||||
- name: Replace labels on swarm node (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label11: value11
|
||||
label12: value12
|
||||
labels_state: replace
|
||||
check_mode: yes
|
||||
register: output_replace_labels_1
|
||||
|
||||
- name: Replace labels on swarm node
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label11: value11
|
||||
label12: value12
|
||||
labels_state: replace
|
||||
register: output_replace_labels_2
|
||||
|
||||
- name: Replace labels on swarm node (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label11: value11
|
||||
label12: value12
|
||||
labels_state: replace
|
||||
register: output_replace_labels_3
|
||||
|
||||
- name: Replace labels on swarm node (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels:
|
||||
label11: value11
|
||||
label12: value12
|
||||
labels_state: replace
|
||||
check_mode: yes
|
||||
register: output_replace_labels_4
|
||||
|
||||
- name: assert replacing labels from swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_replace_labels_1 is changed'
|
||||
- 'output_replace_labels_2 is changed'
|
||||
- 'output_replace_labels_3 is not changed'
|
||||
- 'output_replace_labels_4 is not changed'
|
||||
- 'output_replace_labels_2.node.Spec.Labels | length == 2'
|
||||
- '"label6" not in output_replace_labels_2.node.Spec.Labels'
|
||||
- 'output_replace_labels_2.node.Spec.Labels.label12 == "value12"'
|
||||
|
||||
####################################################################
|
||||
## Remove all labels #############################################
|
||||
####################################################################
|
||||
|
||||
- name: Remove all labels from swarm node (check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_state: replace
|
||||
check_mode: yes
|
||||
register: output_remove_labels_1
|
||||
|
||||
- name: Remove all labels from swarm node
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_state: replace
|
||||
register: output_remove_labels_2
|
||||
|
||||
- name: Remove all labels from swarm node (idempotent)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_state: replace
|
||||
register: output_remove_labels_3
|
||||
|
||||
- name: Remove all labels from swarm node (idempotent check)
|
||||
docker_node:
|
||||
hostname: "{{ nodeid }}"
|
||||
labels_state: replace
|
||||
check_mode: yes
|
||||
register: output_remove_labels_4
|
||||
|
||||
- name: assert removing all lables from swarm node
|
||||
assert:
|
||||
that:
|
||||
- 'output_remove_labels_1 is changed'
|
||||
- 'output_remove_labels_2 is changed'
|
||||
- 'output_remove_labels_3 is not changed'
|
||||
- 'output_remove_labels_4 is not changed'
|
||||
- 'output_remove_labels_2.node.Spec.Labels | length == 0'
|
||||
|
||||
always:
|
||||
- name: Cleanup
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
8
tests/integration/targets/docker_node_info/aliases
Normal file
8
tests/integration/targets/docker_node_info/aliases
Normal file
@ -0,0 +1,8 @@
|
||||
shippable/posix/group3
|
||||
skip/aix
|
||||
skip/osx
|
||||
skip/freebsd
|
||||
destructive
|
||||
skip/docker # The tests sometimes make docker daemon unstable; hence,
|
||||
# we skip all docker-based CI runs to avoid disrupting
|
||||
# the whole CI system.
|
||||
3
tests/integration/targets/docker_node_info/meta/main.yml
Normal file
3
tests/integration/targets/docker_node_info/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
@ -0,0 +1,6 @@
|
||||
- include_tasks: test_node_info.yml
|
||||
# Maximum of 1.24 (docker API version for docker_node_info) and 1.25 (docker API version for docker_swarm) is 1.25
|
||||
when: docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')
|
||||
|
||||
- fail: msg="Too old docker / docker-py version to run docker_node_info tests!"
|
||||
when: not(docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
|
||||
@ -0,0 +1,88 @@
|
||||
---
|
||||
- block:
|
||||
- name: Make sure we're not already using Docker swarm
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
|
||||
- name: Try to get docker_node_info when docker is not running in swarm mode
|
||||
docker_node_info:
|
||||
ignore_errors: yes
|
||||
register: output
|
||||
|
||||
- name: assert failure when called when swarm is not in use or not run on manager node
|
||||
assert:
|
||||
that:
|
||||
- 'output is failed'
|
||||
- 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
|
||||
|
||||
- name: Create a Swarm cluster
|
||||
docker_swarm:
|
||||
state: present
|
||||
advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
|
||||
register: output
|
||||
|
||||
- name: assert changed when create a new swarm cluster
|
||||
assert:
|
||||
that:
|
||||
- 'output is changed'
|
||||
- 'output.actions[0] | regex_search("New Swarm cluster created: ")'
|
||||
- 'output.swarm_facts.JoinTokens.Manager'
|
||||
- 'output.swarm_facts.JoinTokens.Worker'
|
||||
|
||||
- name: Try to get docker_node_info when docker is running in swarm mode and as manager
|
||||
docker_node_info:
|
||||
register: output
|
||||
|
||||
- name: assert reading docker swarm node facts
|
||||
assert:
|
||||
that:
|
||||
- 'output.nodes | length > 0'
|
||||
- 'output.nodes[0].ID is string'
|
||||
|
||||
- name: Try to get docker_node_info using the self parameter
|
||||
docker_node_info:
|
||||
self: yes
|
||||
register: output
|
||||
|
||||
- name: assert reading swarm facts with list of nodes option
|
||||
assert:
|
||||
that:
|
||||
- 'output.nodes | length == 1'
|
||||
- 'output.nodes[0].ID is string'
|
||||
|
||||
- name: Get local docker node name
|
||||
set_fact:
|
||||
localnodename: "{{ output.nodes[0].Description.Hostname }}"
|
||||
|
||||
|
||||
- name: Try to get docker_node_info using the local node name as parameter
|
||||
docker_node_info:
|
||||
name: "{{ localnodename }}"
|
||||
register: output
|
||||
|
||||
- name: assert reading reading swarm facts and using node filter (random node name)
|
||||
assert:
|
||||
that:
|
||||
- 'output.nodes | length == 1'
|
||||
- 'output.nodes[0].ID is string'
|
||||
|
||||
- name: Create random name
|
||||
set_fact:
|
||||
randomnodename: "{{ 'node-%0x' % ((2**32) | random) }}"
|
||||
|
||||
- name: Try to get docker_node_info using random node name as parameter
|
||||
docker_node_info:
|
||||
name: "{{ randomnodename }}"
|
||||
register: output
|
||||
|
||||
- name: assert reading reading swarm facts and using node filter (random node name)
|
||||
assert:
|
||||
that:
|
||||
- 'output.nodes | length == 0'
|
||||
|
||||
always:
|
||||
- name: Cleanup
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user