mirror of
https://github.com/ansible-collections/community.docker.git
synced 2025-12-15 11:32:05 +00:00
3023 lines
114 KiB
Python
3023 lines
114 KiB
Python
#!/usr/bin/python
|
|
#
|
|
# Copyright (c) 2017, Dario Zanzico (git@dariozanzico.com)
|
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
DOCUMENTATION = r"""
|
|
module: docker_swarm_service
|
|
author:
|
|
- "Dario Zanzico (@dariko)"
|
|
- "Jason Witkowski (@jwitko)"
|
|
- "Hannes Ljungberg (@hannseman)"
|
|
- "Piotr Wojciechowski (@wojciechowskipiotr)"
|
|
short_description: docker swarm service
|
|
description:
|
|
- Manages docker services through a swarm manager node.
|
|
- This modules does not support updating services in a stack.
|
|
extends_documentation_fragment:
|
|
- community.docker._docker
|
|
- community.docker._docker.docker_py_2_documentation
|
|
- community.docker._attributes
|
|
- community.docker._attributes.actiongroup_docker
|
|
|
|
attributes:
|
|
check_mode:
|
|
support: full
|
|
diff_mode:
|
|
support: full
|
|
idempotent:
|
|
support: full
|
|
|
|
options:
|
|
args:
|
|
description:
|
|
- List arguments to be passed to the container.
|
|
- Corresponds to the C(ARG) parameter of C(docker service create).
|
|
type: list
|
|
elements: str
|
|
command:
|
|
description:
|
|
- Command to execute when the container starts.
|
|
- A command may be either a string or a list or a list of strings.
|
|
- Corresponds to the C(COMMAND) parameter of C(docker service create).
|
|
type: raw
|
|
configs:
|
|
description:
|
|
- List of dictionaries describing the service configs.
|
|
- Corresponds to the C(--config) option of C(docker service create).
|
|
- Requires API version >= 1.30.
|
|
type: list
|
|
elements: dict
|
|
suboptions:
|
|
config_id:
|
|
description:
|
|
- Config's ID.
|
|
type: str
|
|
config_name:
|
|
description:
|
|
- Config's name as defined at its creation.
|
|
type: str
|
|
required: true
|
|
filename:
|
|
description:
|
|
- Name of the file containing the config. Defaults to the O(configs[].config_name) if not specified.
|
|
type: str
|
|
uid:
|
|
description:
|
|
- UID of the config file's owner.
|
|
type: str
|
|
gid:
|
|
description:
|
|
- GID of the config file's group.
|
|
type: str
|
|
mode:
|
|
description:
|
|
- File access mode inside the container. Must be an octal number (like V(0644) or V(0444)).
|
|
type: int
|
|
container_labels:
|
|
description:
|
|
- Dictionary of key value pairs.
|
|
- Corresponds to the C(--container-label) option of C(docker service create).
|
|
type: dict
|
|
sysctls:
|
|
description:
|
|
- Dictionary of key, value pairs.
|
|
version_added: 3.10.0
|
|
type: dict
|
|
dns:
|
|
description:
|
|
- List of custom DNS servers.
|
|
- Corresponds to the C(--dns) option of C(docker service create).
|
|
type: list
|
|
elements: str
|
|
dns_search:
|
|
description:
|
|
- List of custom DNS search domains.
|
|
- Corresponds to the C(--dns-search) option of C(docker service create).
|
|
type: list
|
|
elements: str
|
|
dns_options:
|
|
description:
|
|
- List of custom DNS options.
|
|
- Corresponds to the C(--dns-option) option of C(docker service create).
|
|
type: list
|
|
elements: str
|
|
endpoint_mode:
|
|
description:
|
|
- Service endpoint mode.
|
|
- Corresponds to the C(--endpoint-mode) option of C(docker service create).
|
|
type: str
|
|
choices:
|
|
- vip
|
|
- dnsrr
|
|
env:
|
|
description:
|
|
- List or dictionary of the service environment variables.
|
|
- If passed a list each items need to be in the format of C(KEY=VALUE).
|
|
- If passed a dictionary values which might be parsed as numbers, booleans or other types by the YAML parser must be
|
|
quoted (for example V("true")) in order to avoid data loss.
|
|
- Corresponds to the C(--env) option of C(docker service create).
|
|
type: raw
|
|
env_files:
|
|
description:
|
|
- List of paths to files, present on the target, containing environment variables C(FOO=BAR).
|
|
- The order of the list is significant in determining the value assigned to a variable that shows up more than once.
|
|
- If variable also present in O(env), then O(env) value will override.
|
|
type: list
|
|
elements: path
|
|
force_update:
|
|
description:
|
|
- Force update even if no changes require it.
|
|
- Corresponds to the C(--force) option of C(docker service update).
|
|
type: bool
|
|
default: false
|
|
groups:
|
|
description:
|
|
- List of additional group names and/or IDs that the container process will run as.
|
|
- Corresponds to the C(--group) option of C(docker service update).
|
|
type: list
|
|
elements: str
|
|
healthcheck:
|
|
description:
|
|
- Configure a check that is run to determine whether or not containers for this service are "healthy". See the docs
|
|
for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) for details
|
|
on how healthchecks work.
|
|
- 'O(healthcheck.interval), O(healthcheck.timeout), and O(healthcheck.start_period) are specified as durations. They
|
|
accept duration as a string in a format that look like: V(5h34m56s), V(1m30s), and so on. The supported units are
|
|
V(us), V(ms), V(s), V(m) and V(h).'
|
|
type: dict
|
|
suboptions:
|
|
test:
|
|
description:
|
|
- Command to run to check health.
|
|
- Must be either a string or a list. If it is a list, the first item must be one of V(NONE), V(CMD) or V(CMD-SHELL).
|
|
type: raw
|
|
interval:
|
|
description:
|
|
- Time between running the check.
|
|
type: str
|
|
timeout:
|
|
description:
|
|
- Maximum time to allow one check to run.
|
|
type: str
|
|
retries:
|
|
description:
|
|
- Consecutive failures needed to report unhealthy. It accept integer value.
|
|
type: int
|
|
start_period:
|
|
description:
|
|
- Start period for the container to initialize before starting health-retries countdown.
|
|
type: str
|
|
hostname:
|
|
description:
|
|
- Container hostname.
|
|
- Corresponds to the C(--hostname) option of C(docker service create).
|
|
type: str
|
|
hosts:
|
|
description:
|
|
- Dict of host-to-IP mappings, where each host name is a key in the dictionary. Each host name will be added to the
|
|
container's /etc/hosts file.
|
|
- Corresponds to the C(--host) option of C(docker service create).
|
|
type: dict
|
|
image:
|
|
description:
|
|
- Service image path and tag.
|
|
- Corresponds to the C(IMAGE) parameter of C(docker service create).
|
|
type: str
|
|
init:
|
|
description:
|
|
- Use an init inside each service container to forward signals and reap processes.
|
|
- Corresponds to the C(--init) option of C(docker service create).
|
|
- Requires API version >= 1.37.
|
|
type: bool
|
|
labels:
|
|
description:
|
|
- Dictionary of key value pairs.
|
|
- Corresponds to the C(--label) option of C(docker service create).
|
|
type: dict
|
|
limits:
|
|
description:
|
|
- Configures service resource limits.
|
|
suboptions:
|
|
cpus:
|
|
description:
|
|
- Service CPU limit. V(0) equals no limit.
|
|
- Corresponds to the C(--limit-cpu) option of C(docker service create).
|
|
type: float
|
|
memory:
|
|
description:
|
|
- Service memory limit in format C(<number>[<unit>]). Number is a positive integer. Unit can be V(B) (byte), V(K)
|
|
(kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), V(T) (tebibyte), or V(P) (pebibyte).
|
|
- V(0) equals no limit.
|
|
- Omitting the unit defaults to bytes.
|
|
- Corresponds to the C(--limit-memory) option of C(docker service create).
|
|
type: str
|
|
type: dict
|
|
logging:
|
|
description:
|
|
- Logging configuration for the service.
|
|
suboptions:
|
|
driver:
|
|
description:
|
|
- Configure the logging driver for a service.
|
|
- Corresponds to the C(--log-driver) option of C(docker service create).
|
|
type: str
|
|
options:
|
|
description:
|
|
- Options for service logging driver.
|
|
- Corresponds to the C(--log-opt) option of C(docker service create).
|
|
type: dict
|
|
type: dict
|
|
mode:
|
|
description:
|
|
- Service replication mode.
|
|
- Service will be removed and recreated when changed.
|
|
- Corresponds to the C(--mode) option of C(docker service create).
|
|
- The value V(replicated-job) was added in community.docker 4.7.0, and requires API version >= 1.41 and Docker SDK for Python >= 6.0.0.
|
|
type: str
|
|
default: replicated
|
|
choices:
|
|
- replicated
|
|
- global
|
|
- replicated-job
|
|
mounts:
|
|
description:
|
|
- List of dictionaries describing the service mounts.
|
|
- Corresponds to the C(--mount) option of C(docker service create).
|
|
type: list
|
|
elements: dict
|
|
suboptions:
|
|
source:
|
|
description:
|
|
- Mount source (for example a volume name or a host path).
|
|
- Must be specified if O(mounts[].type) is not V(tmpfs).
|
|
type: str
|
|
target:
|
|
description:
|
|
- Container path.
|
|
type: str
|
|
required: true
|
|
type:
|
|
description:
|
|
- The mount type.
|
|
- Note that V(npipe) is only supported by Docker for Windows. Also note that V(npipe) was added in Ansible 2.9.
|
|
type: str
|
|
default: bind
|
|
choices:
|
|
- bind
|
|
- volume
|
|
- tmpfs
|
|
- npipe
|
|
readonly:
|
|
description:
|
|
- Whether the mount should be read-only.
|
|
type: bool
|
|
labels:
|
|
description:
|
|
- Volume labels to apply.
|
|
type: dict
|
|
propagation:
|
|
description:
|
|
- The propagation mode to use.
|
|
- Can only be used when O(mounts[].type=bind).
|
|
type: str
|
|
choices:
|
|
- shared
|
|
- slave
|
|
- private
|
|
- rshared
|
|
- rslave
|
|
- rprivate
|
|
no_copy:
|
|
description:
|
|
- Disable copying of data from a container when a volume is created.
|
|
- Can only be used when O(mounts[].type=volume).
|
|
type: bool
|
|
driver_config:
|
|
description:
|
|
- Volume driver configuration.
|
|
- Can only be used when O(mounts[].type=volume).
|
|
suboptions:
|
|
name:
|
|
description:
|
|
- Name of the volume-driver plugin to use for the volume.
|
|
type: str
|
|
options:
|
|
description:
|
|
- Options as key-value pairs to pass to the driver for this volume.
|
|
type: dict
|
|
type: dict
|
|
tmpfs_size:
|
|
description:
|
|
- Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer. Unit can be V(B) (byte),
|
|
V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), V(T) (tebibyte), or V(P) (pebibyte).
|
|
- Can only be used when O(mounts[].type=tmpfs).
|
|
type: str
|
|
tmpfs_mode:
|
|
description:
|
|
- File mode of the tmpfs in octal.
|
|
- Can only be used when O(mounts[].type=tmpfs).
|
|
type: int
|
|
name:
|
|
description:
|
|
- Service name.
|
|
- Corresponds to the C(--name) option of C(docker service create).
|
|
type: str
|
|
required: true
|
|
networks:
|
|
description:
|
|
- List of the service networks names or dictionaries.
|
|
- When passed dictionaries valid sub-options are C(name), which is required, and C(aliases) and C(options).
|
|
- Prior to API version 1.29, updating and removing networks is not supported. If changes are made the service will then
|
|
be removed and recreated.
|
|
- Corresponds to the C(--network) option of C(docker service create).
|
|
type: list
|
|
elements: raw
|
|
placement:
|
|
description:
|
|
- Configures service placement preferences and constraints.
|
|
suboptions:
|
|
constraints:
|
|
description:
|
|
- List of the service constraints.
|
|
- Corresponds to the C(--constraint) option of C(docker service create).
|
|
type: list
|
|
elements: str
|
|
preferences:
|
|
description:
|
|
- List of the placement preferences as key value pairs.
|
|
- Corresponds to the C(--placement-pref) option of C(docker service create).
|
|
- Requires API version >= 1.27.
|
|
type: list
|
|
elements: dict
|
|
replicas_max_per_node:
|
|
description:
|
|
- Maximum number of tasks per node.
|
|
- Corresponds to the C(--replicas_max_per_node) option of C(docker service create).
|
|
- Requires API version >= 1.40.
|
|
type: int
|
|
version_added: 1.3.0
|
|
type: dict
|
|
publish:
|
|
description:
|
|
- List of dictionaries describing the service published ports.
|
|
- Corresponds to the C(--publish) option of C(docker service create).
|
|
type: list
|
|
elements: dict
|
|
suboptions:
|
|
published_port:
|
|
description:
|
|
- The port to make externally available.
|
|
type: int
|
|
required: false
|
|
target_port:
|
|
description:
|
|
- The port inside the container to expose.
|
|
type: int
|
|
required: true
|
|
protocol:
|
|
description:
|
|
- What protocol to use.
|
|
type: str
|
|
default: tcp
|
|
choices:
|
|
- tcp
|
|
- udp
|
|
mode:
|
|
description:
|
|
- What publish mode to use.
|
|
- Requires API version >= 1.32.
|
|
type: str
|
|
choices:
|
|
- ingress
|
|
- host
|
|
read_only:
|
|
description:
|
|
- Mount the containers root filesystem as read only.
|
|
- Corresponds to the C(--read-only) option of C(docker service create).
|
|
type: bool
|
|
replicas:
|
|
description:
|
|
- Number of containers instantiated in the service. Valid only if O(mode=replicated) or O(mode=replicated-job).
|
|
- If set to V(-1), and service is not present, service replicas will be set to V(1).
|
|
- If set to V(-1), and service is present, service replicas will be unchanged.
|
|
- Corresponds to the C(--replicas) option of C(docker service create).
|
|
type: int
|
|
default: -1
|
|
reservations:
|
|
description:
|
|
- Configures service resource reservations.
|
|
suboptions:
|
|
cpus:
|
|
description:
|
|
- Service CPU reservation. V(0) equals no reservation.
|
|
- Corresponds to the C(--reserve-cpu) option of C(docker service create).
|
|
type: float
|
|
memory:
|
|
description:
|
|
- Service memory reservation in format C(<number>[<unit>]). Number is a positive integer. Unit can be V(B) (byte),
|
|
V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), V(T) (tebibyte), or V(P) (pebibyte).
|
|
- V(0) equals no reservation.
|
|
- Omitting the unit defaults to bytes.
|
|
- Corresponds to the C(--reserve-memory) option of C(docker service create).
|
|
type: str
|
|
type: dict
|
|
resolve_image:
|
|
description:
|
|
- If the current image digest should be resolved from registry and updated if changed.
|
|
- Requires API version >= 1.30.
|
|
type: bool
|
|
default: false
|
|
restart_config:
|
|
description:
|
|
- Configures if and how to restart containers when they exit.
|
|
suboptions:
|
|
condition:
|
|
description:
|
|
- Restart condition of the service.
|
|
- Corresponds to the C(--restart-condition) option of C(docker service create).
|
|
type: str
|
|
choices:
|
|
- none
|
|
- on-failure
|
|
- any
|
|
delay:
|
|
description:
|
|
- Delay between restarts.
|
|
- 'Accepts a a string in a format that look like: V(5h34m56s), V(1m30s), and so on. The supported units are V(us), V(ms),
|
|
V(s), V(m) and V(h).'
|
|
- Corresponds to the C(--restart-delay) option of C(docker service create).
|
|
type: str
|
|
max_attempts:
|
|
description:
|
|
- Maximum number of service restarts.
|
|
- Corresponds to the C(--restart-condition) option of C(docker service create).
|
|
type: int
|
|
window:
|
|
description:
|
|
- Restart policy evaluation window.
|
|
- 'Accepts a string in a format that look like: V(5h34m56s), V(1m30s), and so on. The supported units are V(us), V(ms),
|
|
V(s), V(m) and V(h).'
|
|
- Corresponds to the C(--restart-window) option of C(docker service create).
|
|
type: str
|
|
type: dict
|
|
rollback_config:
|
|
description:
|
|
- Configures how the service should be rolled back in case of a failing update.
|
|
suboptions:
|
|
parallelism:
|
|
description:
|
|
- The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously.
|
|
- Corresponds to the C(--rollback-parallelism) option of C(docker service create).
|
|
- Requires API version >= 1.28.
|
|
type: int
|
|
delay:
|
|
description:
|
|
- Delay between task rollbacks.
|
|
- 'Accepts a string in a format that look like: V(5h34m56s), V(1m30s), and so on. The supported units are V(us), V(ms),
|
|
V(s), V(m) and V(h).'
|
|
- Corresponds to the C(--rollback-delay) option of C(docker service create).
|
|
- Requires API version >= 1.28.
|
|
type: str
|
|
failure_action:
|
|
description:
|
|
- Action to take in case of rollback failure.
|
|
- Corresponds to the C(--rollback-failure-action) option of C(docker service create).
|
|
- Requires API version >= 1.28.
|
|
type: str
|
|
choices:
|
|
- continue
|
|
- pause
|
|
monitor:
|
|
description:
|
|
- Duration after each task rollback to monitor for failure.
|
|
- 'Accepts a string in a format that look like: V(5h34m56s), V(1m30s), and so on. The supported units are V(us), V(ms),
|
|
V(s), V(m) and V(h).'
|
|
- Corresponds to the C(--rollback-monitor) option of C(docker service create).
|
|
- Requires API version >= 1.28.
|
|
type: str
|
|
max_failure_ratio:
|
|
description:
|
|
- Fraction of tasks that may fail during a rollback.
|
|
- Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create).
|
|
- Requires API version >= 1.28.
|
|
type: float
|
|
order:
|
|
description:
|
|
- Specifies the order of operations during rollbacks.
|
|
- Corresponds to the C(--rollback-order) option of C(docker service create).
|
|
- Requires API version >= 1.29.
|
|
type: str
|
|
type: dict
|
|
secrets:
|
|
description:
|
|
- List of dictionaries describing the service secrets.
|
|
- Corresponds to the C(--secret) option of C(docker service create).
|
|
type: list
|
|
elements: dict
|
|
suboptions:
|
|
secret_id:
|
|
description:
|
|
- Secret's ID.
|
|
type: str
|
|
secret_name:
|
|
description:
|
|
- Secret's name as defined at its creation.
|
|
type: str
|
|
required: true
|
|
filename:
|
|
description:
|
|
- Name of the file containing the secret. Defaults to the O(secrets[].secret_name) if not specified.
|
|
- Corresponds to the C(target) key of C(docker service create --secret).
|
|
type: str
|
|
uid:
|
|
description:
|
|
- UID of the secret file's owner.
|
|
type: str
|
|
gid:
|
|
description:
|
|
- GID of the secret file's group.
|
|
type: str
|
|
mode:
|
|
description:
|
|
- File access mode inside the container. Must be an octal number (like V(0644) or V(0444)).
|
|
type: int
|
|
state:
|
|
description:
|
|
- V(absent) - A service matching the specified name will be removed and have its tasks stopped.
|
|
- V(present) - Asserts the existence of a service matching the name and provided configuration parameters. Unspecified
|
|
configuration parameters will be set to docker defaults.
|
|
type: str
|
|
default: present
|
|
choices:
|
|
- present
|
|
- absent
|
|
stop_grace_period:
|
|
description:
|
|
- Time to wait before force killing a container.
|
|
- 'Accepts a duration as a string in a format that look like: V(5h34m56s), V(1m30s), and so on. The supported units are V(us),
|
|
V(ms), V(s), V(m) and V(h).'
|
|
- Corresponds to the C(--stop-grace-period) option of C(docker service create).
|
|
type: str
|
|
stop_signal:
|
|
description:
|
|
- Override default signal used to stop the container.
|
|
- Corresponds to the C(--stop-signal) option of C(docker service create).
|
|
type: str
|
|
tty:
|
|
description:
|
|
- Allocate a pseudo-TTY.
|
|
- Corresponds to the C(--tty) option of C(docker service create).
|
|
type: bool
|
|
update_config:
|
|
description:
|
|
- Configures how the service should be updated. Useful for configuring rolling updates.
|
|
suboptions:
|
|
parallelism:
|
|
description:
|
|
- Rolling update parallelism.
|
|
- Corresponds to the C(--update-parallelism) option of C(docker service create).
|
|
type: int
|
|
delay:
|
|
description:
|
|
- Rolling update delay.
|
|
- 'Accepts a string in a format that look like: V(5h34m56s), V(1m30s), and so on. The supported units are V(us), V(ms),
|
|
V(s), V(m) and V(h).'
|
|
- Corresponds to the C(--update-delay) option of C(docker service create).
|
|
type: str
|
|
failure_action:
|
|
description:
|
|
- Action to take in case of container failure.
|
|
- Corresponds to the C(--update-failure-action) option of C(docker service create).
|
|
- Usage of V(rollback) requires API version >= 1.29.
|
|
type: str
|
|
choices:
|
|
- continue
|
|
- pause
|
|
- rollback
|
|
monitor:
|
|
description:
|
|
- Time to monitor updated tasks for failures.
|
|
- 'Accepts a string in a format that look like: V(5h34m56s), V(1m30s), and so on. The supported units are V(us), V(ms),
|
|
V(s), V(m) and V(h).'
|
|
- Corresponds to the C(--update-monitor) option of C(docker service create).
|
|
type: str
|
|
max_failure_ratio:
|
|
description:
|
|
- Fraction of tasks that may fail during an update before the failure action is invoked.
|
|
- Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
|
|
type: float
|
|
order:
|
|
description:
|
|
- Specifies the order of operations when rolling out an updated task.
|
|
- Corresponds to the C(--update-order) option of C(docker service create).
|
|
- Requires API version >= 1.29.
|
|
type: str
|
|
type: dict
|
|
user:
|
|
description:
|
|
- Sets the username or UID used for the specified command.
|
|
- Before Ansible 2.8, the default value for this option was V(root).
|
|
- The default has been removed so that the user defined in the image is used if no user is specified here.
|
|
- Corresponds to the C(--user) option of C(docker service create).
|
|
type: str
|
|
working_dir:
|
|
description:
|
|
- Path to the working directory.
|
|
- Corresponds to the C(--workdir) option of C(docker service create).
|
|
type: str
|
|
cap_add:
|
|
description:
|
|
- List of capabilities to add to the container.
|
|
- Requires API version >= 1.41.
|
|
type: list
|
|
elements: str
|
|
version_added: 2.2.0
|
|
cap_drop:
|
|
description:
|
|
- List of capabilities to drop from the container.
|
|
- Requires API version >= 1.41.
|
|
type: list
|
|
elements: str
|
|
version_added: 2.2.0
|
|
|
|
requirements:
|
|
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2"
|
|
- "Docker API >= 1.25"
|
|
notes:
|
|
- Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0. When using
|
|
older versions use O(force_update=true) to trigger the swarm to resolve a new image.
|
|
"""
|
|
|
|
RETURN = r"""
|
|
swarm_service:
|
|
returned: always
|
|
type: dict
|
|
description:
|
|
- Dictionary of variables representing the current state of the service. Matches the module parameters format.
|
|
- Note that facts are not part of registered vars but accessible directly.
|
|
- Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service), while the module actually
|
|
returned a variable called C(ansible_docker_service). The variable was renamed to RV(swarm_service) in both code and
|
|
documentation for Ansible 2.7.9 and Ansible 2.8.0. In Ansible 2.7.x, the old name C(ansible_docker_service) can still
|
|
be used.
|
|
sample: '{ "args": [ "3600" ], "cap_add": null, "cap_drop": [ "ALL" ], "command": [ "sleep" ], "configs": null, "constraints":
|
|
[ "node.role == manager", "engine.labels.operatingsystem == ubuntu 14.04" ], "container_labels": null, "sysctls": null,
|
|
"dns": null, "dns_options": null, "dns_search": null, "endpoint_mode": null, "env": [ "ENVVAR1=envvar1", "ENVVAR2=envvar2"
|
|
], "force_update": null, "groups": null, "healthcheck": { "interval": 90000000000, "retries": 3, "start_period": 30000000000,
|
|
"test": [ "CMD", "curl", "--fail", "http://nginx.host.com" ], "timeout": 10000000000 }, "healthcheck_disabled": false,
|
|
"hostname": null, "hosts": null, "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8",
|
|
"labels": { "com.example.department": "Finance", "com.example.description": "Accounting webapp" }, "limit_cpu": 0.5, "limit_memory":
|
|
52428800, "log_driver": "fluentd", "log_driver_options": { "fluentd-address": "127.0.0.1:24224", "fluentd-async-connect":
|
|
"true", "tag": "myservice" }, "mode": "replicated", "mounts": [ { "readonly": false, "source": "/tmp/", "target": "/remote_tmp/",
|
|
"type": "bind", "labels": null, "propagation": null, "no_copy": null, "driver_config": null, "tmpfs_size": null, "tmpfs_mode":
|
|
null } ], "networks": null, "placement_preferences": [ { "spread": "node.labels.mylabel" } ], "publish": null, "read_only":
|
|
null, "replicas": 1, "replicas_max_per_node": 1, "reserve_cpu": 0.25, "reserve_memory": 20971520, "restart_policy": "on-failure",
|
|
"restart_policy_attempts": 3, "restart_policy_delay": 5000000000, "restart_policy_window": 120000000000, "secrets": null,
|
|
"stop_grace_period": null, "stop_signal": null, "tty": null, "update_delay": 10000000000, "update_failure_action": null,
|
|
"update_max_failure_ratio": null, "update_monitor": null, "update_order": "stop-first", "update_parallelism": 2, "user":
|
|
null, "working_dir": null }'
|
|
changes:
|
|
returned: always
|
|
description:
|
|
- List of changed service attributes if a service has been altered, [] otherwise.
|
|
type: list
|
|
elements: str
|
|
sample: ['container_labels', 'replicas']
|
|
rebuilt:
|
|
returned: always
|
|
description:
|
|
- True if the service has been recreated (removed and created).
|
|
type: bool
|
|
sample: true
|
|
"""
|
|
|
|
EXAMPLES = r"""
|
|
---
|
|
- name: Set command and arguments
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine
|
|
command: sleep
|
|
args:
|
|
- "3600"
|
|
|
|
- name: Set a bind mount
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine
|
|
mounts:
|
|
- source: /tmp/
|
|
target: /remote_tmp/
|
|
type: bind
|
|
|
|
- name: Set service labels
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine
|
|
labels:
|
|
com.example.description: "Accounting webapp"
|
|
com.example.department: "Finance"
|
|
|
|
- name: Set environment variables
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine
|
|
env:
|
|
ENVVAR1: envvar1
|
|
ENVVAR2: envvar2
|
|
env_files:
|
|
- envs/common.env
|
|
- envs/apps/web.env
|
|
|
|
- name: Set fluentd logging
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine
|
|
logging:
|
|
driver: fluentd
|
|
options:
|
|
fluentd-address: "127.0.0.1:24224"
|
|
fluentd-async-connect: "true"
|
|
tag: myservice
|
|
|
|
- name: Set restart policies
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine
|
|
restart_config:
|
|
condition: on-failure
|
|
delay: 5s
|
|
max_attempts: 3
|
|
window: 120s
|
|
|
|
- name: Set update config
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine
|
|
update_config:
|
|
parallelism: 2
|
|
delay: 10s
|
|
order: stop-first
|
|
|
|
- name: Set rollback config
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine
|
|
update_config:
|
|
failure_action: rollback
|
|
rollback_config:
|
|
parallelism: 2
|
|
delay: 10s
|
|
order: stop-first
|
|
|
|
- name: Set placement preferences
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine:edge
|
|
placement:
|
|
preferences:
|
|
- spread: node.labels.mylabel
|
|
constraints:
|
|
- node.role == manager
|
|
- engine.labels.operatingsystem == ubuntu 14.04
|
|
replicas_max_per_node: 2
|
|
|
|
- name: Set configs
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine:edge
|
|
configs:
|
|
- config_name: myconfig_name
|
|
filename: "/tmp/config.txt"
|
|
|
|
- name: Set networks
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine:edge
|
|
networks:
|
|
- mynetwork
|
|
|
|
- name: Set networks as a dictionary
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine:edge
|
|
networks:
|
|
- name: "mynetwork"
|
|
aliases:
|
|
- "mynetwork_alias"
|
|
options:
|
|
foo: bar
|
|
|
|
- name: Set secrets
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine:edge
|
|
secrets:
|
|
- secret_name: mysecret_name
|
|
filename: "/run/secrets/secret.txt"
|
|
|
|
- name: Start service with healthcheck
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: nginx:1.13
|
|
healthcheck:
|
|
# Check if nginx server is healthy by curl'ing the server.
|
|
# If this fails or timeouts, the healthcheck fails.
|
|
test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
|
|
interval: 1m30s
|
|
timeout: 10s
|
|
retries: 3
|
|
start_period: 30s
|
|
|
|
- name: Configure service resources
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
image: alpine:edge
|
|
reservations:
|
|
cpus: 0.25
|
|
memory: 20M
|
|
limits:
|
|
cpus: 0.50
|
|
memory: 50M
|
|
|
|
- name: Remove service
|
|
community.docker.docker_swarm_service:
|
|
name: myservice
|
|
state: absent
|
|
"""
|
|
|
|
import shlex
|
|
import time
|
|
import traceback
|
|
import typing as t
|
|
|
|
from ansible.module_utils.basic import human_to_bytes
|
|
from ansible.module_utils.common.text.converters import to_text
|
|
|
|
from ansible_collections.community.docker.plugins.module_utils._common import (
|
|
AnsibleDockerClient,
|
|
RequestException,
|
|
)
|
|
from ansible_collections.community.docker.plugins.module_utils._util import (
|
|
DifferenceTracker,
|
|
DockerBaseClass,
|
|
clean_dict_booleans_for_docker_api,
|
|
convert_duration_to_nanosecond,
|
|
parse_healthcheck,
|
|
sanitize_labels,
|
|
)
|
|
from ansible_collections.community.docker.plugins.module_utils._version import (
|
|
LooseVersion,
|
|
)
|
|
|
|
|
|
try:
|
|
from docker import types
|
|
from docker.errors import (
|
|
APIError,
|
|
DockerException,
|
|
NotFound,
|
|
)
|
|
from docker.utils import (
|
|
format_environment,
|
|
parse_env_file,
|
|
parse_repository_tag,
|
|
)
|
|
except ImportError:
|
|
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
|
pass
|
|
|
|
|
|
def get_docker_environment(
|
|
env: str | dict[str, t.Any] | list[t.Any] | None, env_files: list[str] | None
|
|
) -> list[str] | None:
|
|
"""
|
|
Will return a list of "KEY=VALUE" items. Supplied env variable can
|
|
be either a list or a dictionary.
|
|
|
|
If environment files are combined with explicit environment variables,
|
|
the explicit environment variables take precedence.
|
|
"""
|
|
env_dict: dict[str, str] = {}
|
|
if env_files:
|
|
for env_file in env_files:
|
|
parsed_env_file = parse_env_file(env_file)
|
|
for name, value in parsed_env_file.items():
|
|
env_dict[name] = str(value)
|
|
if env is not None and isinstance(env, str):
|
|
env = env.split(",")
|
|
if env is not None and isinstance(env, dict):
|
|
for name, value in env.items():
|
|
if not isinstance(value, str):
|
|
raise ValueError(
|
|
"Non-string value found for env option. Ambiguous env options must be "
|
|
"wrapped in quotes to avoid them being interpreted when directly specified "
|
|
"in YAML, or explicitly converted to strings when the option is templated. "
|
|
f"Key: {name}"
|
|
)
|
|
env_dict[name] = str(value)
|
|
elif env is not None and isinstance(env, list):
|
|
for item in env:
|
|
try:
|
|
name, value = item.split("=", 1)
|
|
except ValueError as exc:
|
|
raise ValueError(
|
|
"Invalid environment variable found in list, needs to be in format KEY=VALUE."
|
|
) from exc
|
|
env_dict[name] = value
|
|
elif env is not None:
|
|
raise ValueError(
|
|
f"Invalid type for env {env} ({type(env)}). Only list or dict allowed."
|
|
)
|
|
env_list = format_environment(env_dict)
|
|
if not env_list:
|
|
if env is not None or env_files is not None:
|
|
return []
|
|
return None
|
|
return sorted(env_list)
|
|
|
|
|
|
@t.overload
|
|
def get_docker_networks(
|
|
networks: list[str | dict[str, t.Any]], network_ids: dict[str, str]
|
|
) -> list[dict[str, t.Any]]: ...
|
|
|
|
|
|
@t.overload
|
|
def get_docker_networks(
|
|
networks: list[str | dict[str, t.Any]] | None, network_ids: dict[str, str]
|
|
) -> list[dict[str, t.Any]] | None: ...
|
|
|
|
|
|
def get_docker_networks(
|
|
networks: list[str | dict[str, t.Any]] | None, network_ids: dict[str, str]
|
|
) -> list[dict[str, t.Any]] | None:
|
|
"""
|
|
Validate a list of network names or a list of network dictionaries.
|
|
Network names will be resolved to ids by using the network_ids mapping.
|
|
"""
|
|
if networks is None:
|
|
return None
|
|
parsed_networks = []
|
|
for network in networks:
|
|
parsed_network: dict[str, t.Any]
|
|
if isinstance(network, str):
|
|
parsed_network = {"name": network}
|
|
elif isinstance(network, dict):
|
|
if "name" not in network:
|
|
raise TypeError(
|
|
'"name" is required when networks are passed as dictionaries.'
|
|
)
|
|
name = network.pop("name")
|
|
parsed_network = {"name": name}
|
|
aliases = network.pop("aliases", None)
|
|
if aliases is not None:
|
|
if not isinstance(aliases, list):
|
|
raise TypeError(
|
|
'"aliases" network option is only allowed as a list'
|
|
)
|
|
if not all(isinstance(alias, str) for alias in aliases):
|
|
raise TypeError("Only strings are allowed as network aliases.")
|
|
parsed_network["aliases"] = aliases
|
|
options = network.pop("options", None)
|
|
if options is not None:
|
|
if not isinstance(options, dict):
|
|
raise TypeError("Only dict is allowed as network options.")
|
|
parsed_network["options"] = clean_dict_booleans_for_docker_api(options)
|
|
# Check if any invalid keys left
|
|
if network:
|
|
invalid_keys = ", ".join(network.keys())
|
|
raise TypeError(
|
|
f"{invalid_keys} are not valid keys for the networks option"
|
|
)
|
|
|
|
else:
|
|
raise TypeError(
|
|
"Only a list of strings or dictionaries are allowed to be passed as networks."
|
|
)
|
|
network_name = parsed_network.pop("name")
|
|
try:
|
|
parsed_network["id"] = network_ids[network_name]
|
|
except KeyError as e:
|
|
raise ValueError(f"Could not find a network named: {e}.") from None
|
|
parsed_networks.append(parsed_network)
|
|
return parsed_networks or []
|
|
|
|
|
|
def get_nanoseconds_from_raw_option(name: str, value: t.Any) -> int | None:
|
|
if value is None:
|
|
return None
|
|
if isinstance(value, int):
|
|
return value
|
|
if isinstance(value, str):
|
|
try:
|
|
return int(value)
|
|
except ValueError:
|
|
return convert_duration_to_nanosecond(value)
|
|
raise ValueError(
|
|
f"Invalid type for {name} {value} ({type(value)}). Only string or int allowed."
|
|
)
|
|
|
|
|
|
def get_value(key: str, values: dict[str, t.Any], default: t.Any = None) -> t.Any:
|
|
value = values.get(key)
|
|
return value if value is not None else default
|
|
|
|
|
|
def has_dict_changed(
|
|
new_dict: dict[str, t.Any] | None, old_dict: dict[str, t.Any] | None
|
|
) -> bool:
|
|
"""
|
|
Check if new_dict has differences compared to old_dict while
|
|
ignoring keys in old_dict which are None in new_dict.
|
|
"""
|
|
if new_dict is None:
|
|
return False
|
|
if not new_dict and old_dict:
|
|
return True
|
|
if not old_dict and new_dict:
|
|
return True
|
|
if old_dict is None:
|
|
# in this case new_dict is empty, only the type checker didn't notice
|
|
return False
|
|
defined_options = {
|
|
option: value for option, value in new_dict.items() if value is not None
|
|
}
|
|
for option, value in defined_options.items():
|
|
old_value = old_dict.get(option)
|
|
if not value and not old_value:
|
|
continue
|
|
if value != old_value:
|
|
return True
|
|
return False
|
|
|
|
|
|
def has_list_changed(
|
|
new_list: list[t.Any] | None,
|
|
old_list: list[t.Any] | None,
|
|
sort_lists: bool = True,
|
|
sort_key: str | None = None,
|
|
) -> bool:
|
|
"""
|
|
Check two lists have differences. Sort lists by default.
|
|
"""
|
|
|
|
def sort_list(unsorted_list: list[t.Any]) -> list[t.Any]:
|
|
"""
|
|
Sort a given list.
|
|
The list may contain dictionaries, so use the sort key to handle them.
|
|
"""
|
|
|
|
if unsorted_list and isinstance(unsorted_list[0], dict):
|
|
if not sort_key:
|
|
raise ValueError("A sort key was not specified when sorting list")
|
|
return sorted(unsorted_list, key=lambda k: k[sort_key])
|
|
|
|
# Either the list is empty or does not contain dictionaries
|
|
try:
|
|
return sorted(unsorted_list)
|
|
except TypeError:
|
|
return unsorted_list
|
|
|
|
if new_list is None:
|
|
return False
|
|
old_list = old_list or []
|
|
if len(new_list) != len(old_list):
|
|
return True
|
|
|
|
if sort_lists:
|
|
zip_data = zip(sort_list(new_list), sort_list(old_list))
|
|
else:
|
|
zip_data = zip(new_list, old_list)
|
|
for new_item, old_item in zip_data:
|
|
is_same_type = type( # noqa: E721, pylint: disable=unidiomatic-typecheck
|
|
new_item
|
|
) == type( # noqa: E721, pylint: disable=unidiomatic-typecheck
|
|
old_item
|
|
)
|
|
if not is_same_type:
|
|
if isinstance(new_item, str) and isinstance(old_item, str):
|
|
# Even though the types are different between these items,
|
|
# they are both strings. Try matching on the same string type.
|
|
try:
|
|
new_item_type = type(new_item)
|
|
old_item_casted = new_item_type(old_item)
|
|
if new_item != old_item_casted:
|
|
return True
|
|
continue
|
|
except UnicodeEncodeError:
|
|
# Fallback to assuming the strings are different
|
|
return True
|
|
else:
|
|
return True
|
|
if isinstance(new_item, dict):
|
|
if has_dict_changed(new_item, old_item):
|
|
return True
|
|
elif new_item != old_item:
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
def have_networks_changed(
|
|
new_networks: list[dict[str, t.Any]] | None,
|
|
old_networks: list[dict[str, t.Any]] | None,
|
|
) -> bool:
|
|
"""Special case list checking for networks to sort aliases"""
|
|
|
|
if new_networks is None:
|
|
return False
|
|
old_networks = old_networks or []
|
|
if len(new_networks) != len(old_networks):
|
|
return True
|
|
|
|
zip_data = zip(
|
|
sorted(new_networks, key=lambda k: k["id"]),
|
|
sorted(old_networks, key=lambda k: k["id"]),
|
|
)
|
|
|
|
for new_item, old_item in zip_data:
|
|
new_item = dict(new_item)
|
|
old_item = dict(old_item)
|
|
# Sort the aliases
|
|
if "aliases" in new_item:
|
|
new_item["aliases"] = sorted(new_item["aliases"] or [])
|
|
if "aliases" in old_item:
|
|
old_item["aliases"] = sorted(old_item["aliases"] or [])
|
|
|
|
if has_dict_changed(new_item, old_item):
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
class DockerService(DockerBaseClass):
|
|
def __init__(
|
|
self, docker_api_version: LooseVersion, docker_py_version: LooseVersion
|
|
) -> None:
|
|
super().__init__()
|
|
self.image: str | None = ""
|
|
self.command: t.Any = None
|
|
self.args: list[str] | None = None
|
|
self.endpoint_mode: t.Literal["vip", "dnsrr"] | None = None
|
|
self.dns: list[str] | None = None
|
|
self.healthcheck: dict[str, t.Any] | None = None
|
|
self.healthcheck_disabled: bool | None = None
|
|
self.hostname: str | None = None
|
|
self.hosts: dict[str, t.Any] | None = None
|
|
self.tty: bool | None = None
|
|
self.dns_search: list[str] | None = None
|
|
self.dns_options: list[str] | None = None
|
|
self.env: t.Any = None
|
|
self.force_update: int | None = None
|
|
self.groups: list[str] | None = None
|
|
self.log_driver: str | None = None
|
|
self.log_driver_options: dict[str, t.Any] | None = None
|
|
self.labels: dict[str, t.Any] | None = None
|
|
self.container_labels: dict[str, t.Any] | None = None
|
|
self.sysctls: dict[str, t.Any] | None = None
|
|
self.limit_cpu: float | None = None
|
|
self.limit_memory: int | None = None
|
|
self.reserve_cpu: float | None = None
|
|
self.reserve_memory: int | None = None
|
|
self.mode: t.Literal["replicated", "global", "replicated-job"] = "replicated"
|
|
self.user: str | None = None
|
|
self.mounts: list[dict[str, t.Any]] | None = None
|
|
self.configs: list[dict[str, t.Any]] | None = None
|
|
self.secrets: list[dict[str, t.Any]] | None = None
|
|
self.constraints: list[str] | None = None
|
|
self.replicas_max_per_node: int | None = None
|
|
self.networks: list[t.Any] | None = None
|
|
self.stop_grace_period: int | None = None
|
|
self.stop_signal: str | None = None
|
|
self.publish: list[dict[str, t.Any]] | None = None
|
|
self.placement_preferences: list[dict[str, t.Any]] | None = None
|
|
self.replicas: int | None = -1
|
|
self.service_id = False
|
|
self.service_version = False
|
|
self.read_only: bool | None = None
|
|
self.restart_policy: t.Literal["none", "on-failure", "any"] | None = None
|
|
self.restart_policy_attempts: int | None = None
|
|
self.restart_policy_delay: str | None = None
|
|
self.restart_policy_window: str | None = None
|
|
self.rollback_config: dict[str, t.Any] | None = None
|
|
self.update_delay: str | None = None
|
|
self.update_parallelism: int | None = None
|
|
self.update_failure_action: (
|
|
t.Literal["continue", "pause", "rollback"] | None
|
|
) = None
|
|
self.update_monitor: str | None = None
|
|
self.update_max_failure_ratio: float | None = None
|
|
self.update_order: str | None = None
|
|
self.working_dir: str | None = None
|
|
self.init: bool | None = None
|
|
self.cap_add: list[str] | None = None
|
|
self.cap_drop: list[str] | None = None
|
|
|
|
self.docker_api_version = docker_api_version
|
|
self.docker_py_version = docker_py_version
|
|
|
|
def get_facts(self) -> dict[str, t.Any]:
|
|
return {
|
|
"image": self.image,
|
|
"mounts": self.mounts,
|
|
"configs": self.configs,
|
|
"networks": self.networks,
|
|
"command": self.command,
|
|
"args": self.args,
|
|
"tty": self.tty,
|
|
"dns": self.dns,
|
|
"dns_search": self.dns_search,
|
|
"dns_options": self.dns_options,
|
|
"healthcheck": self.healthcheck,
|
|
"healthcheck_disabled": self.healthcheck_disabled,
|
|
"hostname": self.hostname,
|
|
"hosts": self.hosts,
|
|
"env": self.env,
|
|
"force_update": self.force_update,
|
|
"groups": self.groups,
|
|
"log_driver": self.log_driver,
|
|
"log_driver_options": self.log_driver_options,
|
|
"publish": self.publish,
|
|
"constraints": self.constraints,
|
|
"replicas_max_per_node": self.replicas_max_per_node,
|
|
"placement_preferences": self.placement_preferences,
|
|
"labels": self.labels,
|
|
"container_labels": self.container_labels,
|
|
"sysctls": self.sysctls,
|
|
"mode": self.mode,
|
|
"replicas": self.replicas,
|
|
"endpoint_mode": self.endpoint_mode,
|
|
"restart_policy": self.restart_policy,
|
|
"secrets": self.secrets,
|
|
"stop_grace_period": self.stop_grace_period,
|
|
"stop_signal": self.stop_signal,
|
|
"limit_cpu": self.limit_cpu,
|
|
"limit_memory": self.limit_memory,
|
|
"read_only": self.read_only,
|
|
"reserve_cpu": self.reserve_cpu,
|
|
"reserve_memory": self.reserve_memory,
|
|
"restart_policy_delay": self.restart_policy_delay,
|
|
"restart_policy_attempts": self.restart_policy_attempts,
|
|
"restart_policy_window": self.restart_policy_window,
|
|
"rollback_config": self.rollback_config,
|
|
"update_delay": self.update_delay,
|
|
"update_parallelism": self.update_parallelism,
|
|
"update_failure_action": self.update_failure_action,
|
|
"update_monitor": self.update_monitor,
|
|
"update_max_failure_ratio": self.update_max_failure_ratio,
|
|
"update_order": self.update_order,
|
|
"user": self.user,
|
|
"working_dir": self.working_dir,
|
|
"init": self.init,
|
|
"cap_add": self.cap_add,
|
|
"cap_drop": self.cap_drop,
|
|
}
|
|
|
|
@property
|
|
def can_update_networks(self) -> bool:
|
|
# Before Docker API 1.29 adding/removing networks was not supported
|
|
return self.docker_api_version >= LooseVersion(
|
|
"1.29"
|
|
) and self.docker_py_version >= LooseVersion("2.7")
|
|
|
|
@property
|
|
def can_use_task_template_networks(self) -> bool:
|
|
# In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec
|
|
return self.docker_py_version >= LooseVersion("2.7")
|
|
|
|
@staticmethod
|
|
def get_restart_config_from_ansible_params(
|
|
params: dict[str, t.Any],
|
|
) -> dict[str, t.Any]:
|
|
restart_config = params["restart_config"] or {}
|
|
condition = get_value(
|
|
"condition",
|
|
restart_config,
|
|
)
|
|
delay = get_value(
|
|
"delay",
|
|
restart_config,
|
|
)
|
|
delay = get_nanoseconds_from_raw_option("restart_policy_delay", delay)
|
|
max_attempts = get_value(
|
|
"max_attempts",
|
|
restart_config,
|
|
)
|
|
window = get_value(
|
|
"window",
|
|
restart_config,
|
|
)
|
|
window = get_nanoseconds_from_raw_option("restart_policy_window", window)
|
|
return {
|
|
"restart_policy": condition,
|
|
"restart_policy_delay": delay,
|
|
"restart_policy_attempts": max_attempts,
|
|
"restart_policy_window": window,
|
|
}
|
|
|
|
@staticmethod
|
|
def get_update_config_from_ansible_params(
|
|
params: dict[str, t.Any],
|
|
) -> dict[str, t.Any]:
|
|
update_config = params["update_config"] or {}
|
|
parallelism = get_value(
|
|
"parallelism",
|
|
update_config,
|
|
)
|
|
delay = get_value(
|
|
"delay",
|
|
update_config,
|
|
)
|
|
delay = get_nanoseconds_from_raw_option("update_delay", delay)
|
|
failure_action = get_value(
|
|
"failure_action",
|
|
update_config,
|
|
)
|
|
monitor = get_value(
|
|
"monitor",
|
|
update_config,
|
|
)
|
|
monitor = get_nanoseconds_from_raw_option("update_monitor", monitor)
|
|
max_failure_ratio = get_value(
|
|
"max_failure_ratio",
|
|
update_config,
|
|
)
|
|
order = get_value(
|
|
"order",
|
|
update_config,
|
|
)
|
|
return {
|
|
"update_parallelism": parallelism,
|
|
"update_delay": delay,
|
|
"update_failure_action": failure_action,
|
|
"update_monitor": monitor,
|
|
"update_max_failure_ratio": max_failure_ratio,
|
|
"update_order": order,
|
|
}
|
|
|
|
@staticmethod
|
|
def get_rollback_config_from_ansible_params(
|
|
params: dict[str, t.Any],
|
|
) -> dict[str, t.Any] | None:
|
|
if params["rollback_config"] is None:
|
|
return None
|
|
rollback_config = params["rollback_config"] or {}
|
|
delay = get_nanoseconds_from_raw_option(
|
|
"rollback_config.delay", rollback_config.get("delay")
|
|
)
|
|
monitor = get_nanoseconds_from_raw_option(
|
|
"rollback_config.monitor", rollback_config.get("monitor")
|
|
)
|
|
return {
|
|
"parallelism": rollback_config.get("parallelism"),
|
|
"delay": delay,
|
|
"failure_action": rollback_config.get("failure_action"),
|
|
"monitor": monitor,
|
|
"max_failure_ratio": rollback_config.get("max_failure_ratio"),
|
|
"order": rollback_config.get("order"),
|
|
}
|
|
|
|
@staticmethod
|
|
def get_logging_from_ansible_params(params: dict[str, t.Any]) -> dict[str, t.Any]:
|
|
logging_config = params["logging"] or {}
|
|
driver = get_value(
|
|
"driver",
|
|
logging_config,
|
|
)
|
|
options = get_value(
|
|
"options",
|
|
logging_config,
|
|
)
|
|
return {
|
|
"log_driver": driver,
|
|
"log_driver_options": options,
|
|
}
|
|
|
|
@staticmethod
|
|
def get_limits_from_ansible_params(params: dict[str, t.Any]) -> dict[str, t.Any]:
|
|
limits = params["limits"] or {}
|
|
cpus = get_value(
|
|
"cpus",
|
|
limits,
|
|
)
|
|
memory = get_value(
|
|
"memory",
|
|
limits,
|
|
)
|
|
if memory is not None:
|
|
try:
|
|
memory = human_to_bytes(memory)
|
|
except ValueError as exc:
|
|
raise ValueError(
|
|
f"Failed to convert limit_memory to bytes: {exc}"
|
|
) from exc
|
|
return {
|
|
"limit_cpu": cpus,
|
|
"limit_memory": memory,
|
|
}
|
|
|
|
@staticmethod
|
|
def get_reservations_from_ansible_params(
|
|
params: dict[str, t.Any],
|
|
) -> dict[str, t.Any]:
|
|
reservations = params["reservations"] or {}
|
|
cpus = get_value(
|
|
"cpus",
|
|
reservations,
|
|
)
|
|
memory = get_value(
|
|
"memory",
|
|
reservations,
|
|
)
|
|
|
|
if memory is not None:
|
|
try:
|
|
memory = human_to_bytes(memory)
|
|
except ValueError as exc:
|
|
raise ValueError(
|
|
f"Failed to convert reserve_memory to bytes: {exc}"
|
|
) from exc
|
|
return {
|
|
"reserve_cpu": cpus,
|
|
"reserve_memory": memory,
|
|
}
|
|
|
|
@staticmethod
|
|
def get_placement_from_ansible_params(params: dict[str, t.Any]) -> dict[str, t.Any]:
|
|
placement = params["placement"] or {}
|
|
constraints = get_value("constraints", placement)
|
|
|
|
preferences = placement.get("preferences")
|
|
replicas_max_per_node = get_value("replicas_max_per_node", placement)
|
|
|
|
return {
|
|
"constraints": constraints,
|
|
"placement_preferences": preferences,
|
|
"replicas_max_per_node": replicas_max_per_node,
|
|
}
|
|
|
|
@classmethod
|
|
def from_ansible_params(
|
|
cls,
|
|
ap: dict[str, t.Any],
|
|
old_service: DockerService | None,
|
|
image_digest: str,
|
|
secret_ids: dict[str, str],
|
|
config_ids: dict[str, str],
|
|
network_ids: dict[str, str],
|
|
client: AnsibleDockerClient,
|
|
) -> DockerService:
|
|
s = DockerService(client.docker_api_version, client.docker_py_version)
|
|
s.image = image_digest
|
|
s.args = ap["args"]
|
|
s.endpoint_mode = ap["endpoint_mode"]
|
|
s.dns = ap["dns"]
|
|
s.dns_search = ap["dns_search"]
|
|
s.dns_options = ap["dns_options"]
|
|
s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap["healthcheck"])
|
|
s.hostname = ap["hostname"]
|
|
s.hosts = ap["hosts"]
|
|
s.tty = ap["tty"]
|
|
s.labels = ap["labels"]
|
|
sanitize_labels(s.labels, "labels", client)
|
|
s.container_labels = ap["container_labels"]
|
|
sanitize_labels(s.container_labels, "container_labels", client)
|
|
s.sysctls = ap["sysctls"]
|
|
s.mode = ap["mode"]
|
|
s.stop_signal = ap["stop_signal"]
|
|
s.user = ap["user"]
|
|
s.working_dir = ap["working_dir"]
|
|
s.read_only = ap["read_only"]
|
|
s.init = ap["init"]
|
|
s.cap_add = ap["cap_add"]
|
|
s.cap_drop = ap["cap_drop"]
|
|
|
|
s.networks = get_docker_networks(ap["networks"], network_ids)
|
|
|
|
s.command = ap["command"]
|
|
if isinstance(s.command, str):
|
|
s.command = shlex.split(s.command)
|
|
elif isinstance(s.command, list):
|
|
invalid_items = [
|
|
(index, item)
|
|
for index, item in enumerate(s.command)
|
|
if not isinstance(item, str)
|
|
]
|
|
if invalid_items:
|
|
errors = ", ".join(
|
|
[
|
|
f"{item} ({type(item)}) at index {index}"
|
|
for index, item in invalid_items
|
|
]
|
|
)
|
|
raise ValueError(
|
|
"All items in a command list need to be strings. "
|
|
f"Check quoting. Invalid items: {errors}."
|
|
)
|
|
s.command = ap["command"]
|
|
elif s.command is not None:
|
|
raise ValueError(
|
|
f"Invalid type for command {s.command} ({type(s.command)}). "
|
|
"Only string or list allowed. Check quoting."
|
|
)
|
|
|
|
s.env = get_docker_environment(ap["env"], ap["env_files"])
|
|
s.rollback_config = cls.get_rollback_config_from_ansible_params(ap)
|
|
|
|
update_config = cls.get_update_config_from_ansible_params(ap)
|
|
for key, value in update_config.items():
|
|
setattr(s, key, value)
|
|
|
|
restart_config = cls.get_restart_config_from_ansible_params(ap)
|
|
for key, value in restart_config.items():
|
|
setattr(s, key, value)
|
|
|
|
logging_config = cls.get_logging_from_ansible_params(ap)
|
|
for key, value in logging_config.items():
|
|
setattr(s, key, value)
|
|
|
|
limits = cls.get_limits_from_ansible_params(ap)
|
|
for key, value in limits.items():
|
|
setattr(s, key, value)
|
|
|
|
reservations = cls.get_reservations_from_ansible_params(ap)
|
|
for key, value in reservations.items():
|
|
setattr(s, key, value)
|
|
|
|
placement = cls.get_placement_from_ansible_params(ap)
|
|
for key, value in placement.items():
|
|
setattr(s, key, value)
|
|
|
|
if ap["stop_grace_period"] is not None:
|
|
s.stop_grace_period = convert_duration_to_nanosecond(
|
|
ap["stop_grace_period"]
|
|
)
|
|
|
|
if ap["force_update"]:
|
|
s.force_update = int(str(time.time()).replace(".", ""))
|
|
|
|
if ap["groups"] is not None:
|
|
# In case integers are passed as groups, we need to convert them to
|
|
# strings as docker internally treats them as strings.
|
|
s.groups = [str(g) for g in ap["groups"]]
|
|
|
|
if ap["replicas"] == -1:
|
|
if old_service:
|
|
s.replicas = old_service.replicas
|
|
else:
|
|
s.replicas = 1
|
|
else:
|
|
s.replicas = ap["replicas"]
|
|
|
|
if ap["publish"] is not None:
|
|
s.publish = []
|
|
for param_p in ap["publish"]:
|
|
service_p = {}
|
|
service_p["protocol"] = param_p["protocol"]
|
|
service_p["mode"] = param_p["mode"]
|
|
service_p["published_port"] = param_p["published_port"]
|
|
service_p["target_port"] = param_p["target_port"]
|
|
s.publish.append(service_p)
|
|
|
|
if ap["mounts"] is not None:
|
|
s.mounts = []
|
|
for param_m in ap["mounts"]:
|
|
service_m = {}
|
|
service_m["readonly"] = param_m["readonly"]
|
|
service_m["type"] = param_m["type"]
|
|
if param_m["source"] is None and param_m["type"] != "tmpfs":
|
|
raise ValueError(
|
|
"Source must be specified for mounts which are not of type tmpfs"
|
|
)
|
|
service_m["source"] = param_m["source"] or ""
|
|
service_m["target"] = param_m["target"]
|
|
service_m["labels"] = param_m["labels"]
|
|
service_m["no_copy"] = param_m["no_copy"]
|
|
service_m["propagation"] = param_m["propagation"]
|
|
service_m["driver_config"] = param_m["driver_config"]
|
|
service_m["tmpfs_mode"] = param_m["tmpfs_mode"]
|
|
tmpfs_size = param_m["tmpfs_size"]
|
|
if tmpfs_size is not None:
|
|
try:
|
|
tmpfs_size = human_to_bytes(tmpfs_size)
|
|
except ValueError as exc:
|
|
raise ValueError(
|
|
f"Failed to convert tmpfs_size to bytes: {exc}"
|
|
) from exc
|
|
|
|
service_m["tmpfs_size"] = tmpfs_size
|
|
s.mounts.append(service_m)
|
|
|
|
if ap["configs"] is not None:
|
|
s.configs = []
|
|
for param_m in ap["configs"]:
|
|
service_c = {}
|
|
config_name = param_m["config_name"]
|
|
service_c["config_id"] = param_m["config_id"] or config_ids[config_name]
|
|
service_c["config_name"] = config_name
|
|
service_c["filename"] = param_m["filename"] or config_name
|
|
service_c["uid"] = param_m["uid"]
|
|
service_c["gid"] = param_m["gid"]
|
|
service_c["mode"] = param_m["mode"]
|
|
s.configs.append(service_c)
|
|
|
|
if ap["secrets"] is not None:
|
|
s.secrets = []
|
|
for param_m in ap["secrets"]:
|
|
service_s = {}
|
|
secret_name = param_m["secret_name"]
|
|
service_s["secret_id"] = param_m["secret_id"] or secret_ids[secret_name]
|
|
service_s["secret_name"] = secret_name
|
|
service_s["filename"] = param_m["filename"] or secret_name
|
|
service_s["uid"] = param_m["uid"]
|
|
service_s["gid"] = param_m["gid"]
|
|
service_s["mode"] = param_m["mode"]
|
|
s.secrets.append(service_s)
|
|
|
|
return s
|
|
|
|
def compare(self, os: DockerService) -> tuple[bool, DifferenceTracker, bool, bool]:
|
|
differences = DifferenceTracker()
|
|
needs_rebuild = False
|
|
force_update = False
|
|
if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode:
|
|
differences.add(
|
|
"endpoint_mode", parameter=self.endpoint_mode, active=os.endpoint_mode
|
|
)
|
|
if has_list_changed(self.env, os.env):
|
|
differences.add("env", parameter=self.env, active=os.env)
|
|
if self.log_driver is not None and self.log_driver != os.log_driver:
|
|
differences.add(
|
|
"log_driver", parameter=self.log_driver, active=os.log_driver
|
|
)
|
|
if self.log_driver_options is not None and self.log_driver_options != (
|
|
os.log_driver_options or {}
|
|
):
|
|
differences.add(
|
|
"log_opt",
|
|
parameter=self.log_driver_options,
|
|
active=os.log_driver_options,
|
|
)
|
|
if self.mode != os.mode:
|
|
needs_rebuild = True
|
|
differences.add("mode", parameter=self.mode, active=os.mode)
|
|
if has_list_changed(self.mounts, os.mounts, sort_key="target"):
|
|
differences.add("mounts", parameter=self.mounts, active=os.mounts)
|
|
if has_list_changed(self.configs, os.configs, sort_key="config_name"):
|
|
differences.add("configs", parameter=self.configs, active=os.configs)
|
|
if has_list_changed(self.secrets, os.secrets, sort_key="secret_name"):
|
|
differences.add("secrets", parameter=self.secrets, active=os.secrets)
|
|
if have_networks_changed(self.networks, os.networks):
|
|
differences.add("networks", parameter=self.networks, active=os.networks)
|
|
needs_rebuild = not self.can_update_networks
|
|
if self.replicas != os.replicas:
|
|
differences.add("replicas", parameter=self.replicas, active=os.replicas)
|
|
if has_list_changed(self.command, os.command, sort_lists=False):
|
|
differences.add("command", parameter=self.command, active=os.command)
|
|
if has_list_changed(self.args, os.args, sort_lists=False):
|
|
differences.add("args", parameter=self.args, active=os.args)
|
|
if has_list_changed(self.constraints, os.constraints):
|
|
differences.add(
|
|
"constraints", parameter=self.constraints, active=os.constraints
|
|
)
|
|
if (
|
|
self.replicas_max_per_node is not None
|
|
and self.replicas_max_per_node != os.replicas_max_per_node
|
|
):
|
|
differences.add(
|
|
"replicas_max_per_node",
|
|
parameter=self.replicas_max_per_node,
|
|
active=os.replicas_max_per_node,
|
|
)
|
|
if has_list_changed(
|
|
self.placement_preferences, os.placement_preferences, sort_lists=False
|
|
):
|
|
differences.add(
|
|
"placement_preferences",
|
|
parameter=self.placement_preferences,
|
|
active=os.placement_preferences,
|
|
)
|
|
if has_list_changed(self.groups, os.groups):
|
|
differences.add("groups", parameter=self.groups, active=os.groups)
|
|
if self.labels is not None and self.labels != (os.labels or {}):
|
|
differences.add("labels", parameter=self.labels, active=os.labels)
|
|
if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu:
|
|
differences.add("limit_cpu", parameter=self.limit_cpu, active=os.limit_cpu)
|
|
if self.limit_memory is not None and self.limit_memory != os.limit_memory:
|
|
differences.add(
|
|
"limit_memory", parameter=self.limit_memory, active=os.limit_memory
|
|
)
|
|
if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu:
|
|
differences.add(
|
|
"reserve_cpu", parameter=self.reserve_cpu, active=os.reserve_cpu
|
|
)
|
|
if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory:
|
|
differences.add(
|
|
"reserve_memory",
|
|
parameter=self.reserve_memory,
|
|
active=os.reserve_memory,
|
|
)
|
|
if self.container_labels is not None and self.container_labels != (
|
|
os.container_labels or {}
|
|
):
|
|
differences.add(
|
|
"container_labels",
|
|
parameter=self.container_labels,
|
|
active=os.container_labels,
|
|
)
|
|
if self.sysctls is not None and self.sysctls != (os.sysctls or {}):
|
|
differences.add("sysctls", parameter=self.sysctls, active=os.sysctls)
|
|
if self.stop_signal is not None and self.stop_signal != os.stop_signal:
|
|
differences.add(
|
|
"stop_signal", parameter=self.stop_signal, active=os.stop_signal
|
|
)
|
|
if (
|
|
self.stop_grace_period is not None
|
|
and self.stop_grace_period != os.stop_grace_period
|
|
):
|
|
differences.add(
|
|
"stop_grace_period",
|
|
parameter=self.stop_grace_period,
|
|
active=os.stop_grace_period,
|
|
)
|
|
if self.has_publish_changed(os.publish):
|
|
differences.add("publish", parameter=self.publish, active=os.publish)
|
|
if self.read_only is not None and self.read_only != os.read_only:
|
|
differences.add("read_only", parameter=self.read_only, active=os.read_only)
|
|
if self.restart_policy is not None and self.restart_policy != os.restart_policy:
|
|
differences.add(
|
|
"restart_policy",
|
|
parameter=self.restart_policy,
|
|
active=os.restart_policy,
|
|
)
|
|
if (
|
|
self.restart_policy_attempts is not None
|
|
and self.restart_policy_attempts != os.restart_policy_attempts
|
|
):
|
|
differences.add(
|
|
"restart_policy_attempts",
|
|
parameter=self.restart_policy_attempts,
|
|
active=os.restart_policy_attempts,
|
|
)
|
|
if (
|
|
self.restart_policy_delay is not None
|
|
and self.restart_policy_delay != os.restart_policy_delay
|
|
):
|
|
differences.add(
|
|
"restart_policy_delay",
|
|
parameter=self.restart_policy_delay,
|
|
active=os.restart_policy_delay,
|
|
)
|
|
if (
|
|
self.restart_policy_window is not None
|
|
and self.restart_policy_window != os.restart_policy_window
|
|
):
|
|
differences.add(
|
|
"restart_policy_window",
|
|
parameter=self.restart_policy_window,
|
|
active=os.restart_policy_window,
|
|
)
|
|
if has_dict_changed(self.rollback_config, os.rollback_config):
|
|
differences.add(
|
|
"rollback_config",
|
|
parameter=self.rollback_config,
|
|
active=os.rollback_config,
|
|
)
|
|
if self.update_delay is not None and self.update_delay != os.update_delay:
|
|
differences.add(
|
|
"update_delay", parameter=self.update_delay, active=os.update_delay
|
|
)
|
|
if (
|
|
self.update_parallelism is not None
|
|
and self.update_parallelism != os.update_parallelism
|
|
):
|
|
differences.add(
|
|
"update_parallelism",
|
|
parameter=self.update_parallelism,
|
|
active=os.update_parallelism,
|
|
)
|
|
if (
|
|
self.update_failure_action is not None
|
|
and self.update_failure_action != os.update_failure_action
|
|
):
|
|
differences.add(
|
|
"update_failure_action",
|
|
parameter=self.update_failure_action,
|
|
active=os.update_failure_action,
|
|
)
|
|
if self.update_monitor is not None and self.update_monitor != os.update_monitor:
|
|
differences.add(
|
|
"update_monitor",
|
|
parameter=self.update_monitor,
|
|
active=os.update_monitor,
|
|
)
|
|
if (
|
|
self.update_max_failure_ratio is not None
|
|
and self.update_max_failure_ratio != os.update_max_failure_ratio
|
|
):
|
|
differences.add(
|
|
"update_max_failure_ratio",
|
|
parameter=self.update_max_failure_ratio,
|
|
active=os.update_max_failure_ratio,
|
|
)
|
|
if self.update_order is not None and self.update_order != os.update_order:
|
|
differences.add(
|
|
"update_order", parameter=self.update_order, active=os.update_order
|
|
)
|
|
has_image_changed, change = self.has_image_changed(os.image or "")
|
|
if has_image_changed:
|
|
differences.add("image", parameter=self.image, active=change)
|
|
if self.user and self.user != os.user:
|
|
differences.add("user", parameter=self.user, active=os.user)
|
|
if has_list_changed(self.dns, os.dns, sort_lists=False):
|
|
differences.add("dns", parameter=self.dns, active=os.dns)
|
|
if has_list_changed(self.dns_search, os.dns_search, sort_lists=False):
|
|
differences.add(
|
|
"dns_search", parameter=self.dns_search, active=os.dns_search
|
|
)
|
|
if has_list_changed(self.dns_options, os.dns_options):
|
|
differences.add(
|
|
"dns_options", parameter=self.dns_options, active=os.dns_options
|
|
)
|
|
if self.has_healthcheck_changed(os):
|
|
differences.add(
|
|
"healthcheck", parameter=self.healthcheck, active=os.healthcheck
|
|
)
|
|
if self.hostname is not None and self.hostname != os.hostname:
|
|
differences.add("hostname", parameter=self.hostname, active=os.hostname)
|
|
if self.hosts is not None and self.hosts != (os.hosts or {}):
|
|
differences.add("hosts", parameter=self.hosts, active=os.hosts)
|
|
if self.tty is not None and self.tty != os.tty:
|
|
differences.add("tty", parameter=self.tty, active=os.tty)
|
|
if self.working_dir is not None and self.working_dir != os.working_dir:
|
|
differences.add(
|
|
"working_dir", parameter=self.working_dir, active=os.working_dir
|
|
)
|
|
if self.force_update:
|
|
force_update = True
|
|
if self.init is not None and self.init != os.init:
|
|
differences.add("init", parameter=self.init, active=os.init)
|
|
if has_list_changed(self.cap_add, os.cap_add):
|
|
differences.add("cap_add", parameter=self.cap_add, active=os.cap_add)
|
|
if has_list_changed(self.cap_drop, os.cap_drop):
|
|
differences.add("cap_drop", parameter=self.cap_drop, active=os.cap_drop)
|
|
return (
|
|
not differences.empty or force_update,
|
|
differences,
|
|
needs_rebuild,
|
|
force_update,
|
|
)
|
|
|
|
def has_healthcheck_changed(self, old_publish: DockerService) -> bool:
|
|
if self.healthcheck_disabled is False and self.healthcheck is None:
|
|
return False
|
|
if self.healthcheck_disabled:
|
|
if old_publish.healthcheck is None:
|
|
return False
|
|
if old_publish.healthcheck.get("test") == ["NONE"]:
|
|
return False
|
|
return self.healthcheck != old_publish.healthcheck
|
|
|
|
def has_publish_changed(self, old_publish: list[dict[str, t.Any]] | None) -> bool:
|
|
if self.publish is None:
|
|
return False
|
|
old_publish = old_publish or []
|
|
if len(self.publish) != len(old_publish):
|
|
return True
|
|
|
|
def publish_sorter(item: dict[str, t.Any]) -> tuple[int, int, str]:
|
|
return (
|
|
item.get("published_port") or 0,
|
|
item.get("target_port") or 0,
|
|
item.get("protocol") or "",
|
|
)
|
|
|
|
publish = sorted(self.publish, key=publish_sorter)
|
|
old_publish = sorted(old_publish, key=publish_sorter)
|
|
for publish_item, old_publish_item in zip(publish, old_publish):
|
|
ignored_keys = set()
|
|
if not publish_item.get("mode"):
|
|
ignored_keys.add("mode")
|
|
# Create copies of publish_item dicts where keys specified in ignored_keys are left out
|
|
filtered_old_publish_item = {
|
|
k: v for k, v in old_publish_item.items() if k not in ignored_keys
|
|
}
|
|
filtered_publish_item = {
|
|
k: v for k, v in publish_item.items() if k not in ignored_keys
|
|
}
|
|
if filtered_publish_item != filtered_old_publish_item:
|
|
return True
|
|
return False
|
|
|
|
def has_image_changed(self, old_image: str) -> tuple[bool, str]:
|
|
assert self.image is not None
|
|
if "@" not in self.image:
|
|
old_image = old_image.split("@")[0]
|
|
return self.image != old_image, old_image
|
|
|
|
def build_container_spec(self) -> types.ContainerSpec:
|
|
mounts = None
|
|
if self.mounts is not None:
|
|
mounts = []
|
|
for mount_config in self.mounts:
|
|
mount_options = {
|
|
"target": "target",
|
|
"source": "source",
|
|
"type": "type",
|
|
"readonly": "read_only",
|
|
"propagation": "propagation",
|
|
"labels": "labels",
|
|
"no_copy": "no_copy",
|
|
"driver_config": "driver_config",
|
|
"tmpfs_size": "tmpfs_size",
|
|
"tmpfs_mode": "tmpfs_mode",
|
|
}
|
|
mount_args = {}
|
|
for option, mount_arg in mount_options.items():
|
|
value = mount_config.get(option)
|
|
if value is not None:
|
|
mount_args[mount_arg] = value
|
|
|
|
mounts.append(types.Mount(**mount_args))
|
|
|
|
configs = None
|
|
if self.configs is not None:
|
|
configs = []
|
|
for config_config in self.configs:
|
|
config_args = {
|
|
"config_id": config_config["config_id"],
|
|
"config_name": config_config["config_name"],
|
|
}
|
|
filename = config_config.get("filename")
|
|
if filename:
|
|
config_args["filename"] = filename
|
|
uid = config_config.get("uid")
|
|
if uid:
|
|
config_args["uid"] = uid
|
|
gid = config_config.get("gid")
|
|
if gid:
|
|
config_args["gid"] = gid
|
|
mode = config_config.get("mode")
|
|
if mode:
|
|
config_args["mode"] = mode
|
|
|
|
configs.append(types.ConfigReference(**config_args))
|
|
|
|
secrets = None
|
|
if self.secrets is not None:
|
|
secrets = []
|
|
for secret_config in self.secrets:
|
|
secret_args = {
|
|
"secret_id": secret_config["secret_id"],
|
|
"secret_name": secret_config["secret_name"],
|
|
}
|
|
filename = secret_config.get("filename")
|
|
if filename:
|
|
secret_args["filename"] = filename
|
|
uid = secret_config.get("uid")
|
|
if uid:
|
|
secret_args["uid"] = uid
|
|
gid = secret_config.get("gid")
|
|
if gid:
|
|
secret_args["gid"] = gid
|
|
mode = secret_config.get("mode")
|
|
if mode:
|
|
secret_args["mode"] = mode
|
|
|
|
secrets.append(types.SecretReference(**secret_args))
|
|
|
|
dns_config_args: dict[str, t.Any] = {}
|
|
if self.dns is not None:
|
|
dns_config_args["nameservers"] = self.dns
|
|
if self.dns_search is not None:
|
|
dns_config_args["search"] = self.dns_search
|
|
if self.dns_options is not None:
|
|
dns_config_args["options"] = self.dns_options
|
|
dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None
|
|
|
|
container_spec_args: dict[str, t.Any] = {}
|
|
if self.command is not None:
|
|
container_spec_args["command"] = self.command
|
|
if self.args is not None:
|
|
container_spec_args["args"] = self.args
|
|
if self.env is not None:
|
|
container_spec_args["env"] = self.env
|
|
if self.user is not None:
|
|
container_spec_args["user"] = self.user
|
|
if self.container_labels is not None:
|
|
container_spec_args["labels"] = self.container_labels
|
|
if self.sysctls is not None:
|
|
container_spec_args["sysctls"] = self.sysctls
|
|
if self.healthcheck is not None:
|
|
container_spec_args["healthcheck"] = types.Healthcheck(**self.healthcheck)
|
|
elif self.healthcheck_disabled:
|
|
container_spec_args["healthcheck"] = types.Healthcheck(test=["NONE"])
|
|
if self.hostname is not None:
|
|
container_spec_args["hostname"] = self.hostname
|
|
if self.hosts is not None:
|
|
container_spec_args["hosts"] = self.hosts
|
|
if self.read_only is not None:
|
|
container_spec_args["read_only"] = self.read_only
|
|
if self.stop_grace_period is not None:
|
|
container_spec_args["stop_grace_period"] = self.stop_grace_period
|
|
if self.stop_signal is not None:
|
|
container_spec_args["stop_signal"] = self.stop_signal
|
|
if self.tty is not None:
|
|
container_spec_args["tty"] = self.tty
|
|
if self.groups is not None:
|
|
container_spec_args["groups"] = self.groups
|
|
if self.working_dir is not None:
|
|
container_spec_args["workdir"] = self.working_dir
|
|
if secrets is not None:
|
|
container_spec_args["secrets"] = secrets
|
|
if mounts is not None:
|
|
container_spec_args["mounts"] = mounts
|
|
if dns_config is not None:
|
|
container_spec_args["dns_config"] = dns_config
|
|
if configs is not None:
|
|
container_spec_args["configs"] = configs
|
|
if self.init is not None:
|
|
container_spec_args["init"] = self.init
|
|
if self.cap_add is not None:
|
|
container_spec_args["cap_add"] = self.cap_add
|
|
if self.cap_drop is not None:
|
|
container_spec_args["cap_drop"] = self.cap_drop
|
|
|
|
return types.ContainerSpec(self.image, **container_spec_args)
|
|
|
|
def build_placement(self) -> types.Placement | None:
|
|
placement_args: dict[str, t.Any] = {}
|
|
if self.constraints is not None:
|
|
placement_args["constraints"] = self.constraints
|
|
if self.replicas_max_per_node is not None:
|
|
placement_args["maxreplicas"] = self.replicas_max_per_node
|
|
if self.placement_preferences is not None:
|
|
placement_args["preferences"] = [
|
|
{key.title(): {"SpreadDescriptor": value}}
|
|
for preference in self.placement_preferences
|
|
for key, value in preference.items()
|
|
]
|
|
return types.Placement(**placement_args) if placement_args else None
|
|
|
|
def build_update_config(self) -> types.UpdateConfig | None:
|
|
update_config_args: dict[str, t.Any] = {}
|
|
if self.update_parallelism is not None:
|
|
update_config_args["parallelism"] = self.update_parallelism
|
|
if self.update_delay is not None:
|
|
update_config_args["delay"] = self.update_delay
|
|
if self.update_failure_action is not None:
|
|
update_config_args["failure_action"] = self.update_failure_action
|
|
if self.update_monitor is not None:
|
|
update_config_args["monitor"] = self.update_monitor
|
|
if self.update_max_failure_ratio is not None:
|
|
update_config_args["max_failure_ratio"] = self.update_max_failure_ratio
|
|
if self.update_order is not None:
|
|
update_config_args["order"] = self.update_order
|
|
return types.UpdateConfig(**update_config_args) if update_config_args else None
|
|
|
|
def build_log_driver(self) -> types.DriverConfig | None:
|
|
log_driver_args: dict[str, t.Any] = {}
|
|
if self.log_driver is not None:
|
|
log_driver_args["name"] = self.log_driver
|
|
if self.log_driver_options is not None:
|
|
log_driver_args["options"] = self.log_driver_options
|
|
return types.DriverConfig(**log_driver_args) if log_driver_args else None
|
|
|
|
def build_restart_policy(self) -> types.RestartPolicy | None:
|
|
restart_policy_args: dict[str, t.Any] = {}
|
|
if self.restart_policy is not None:
|
|
restart_policy_args["condition"] = self.restart_policy
|
|
if self.restart_policy_delay is not None:
|
|
restart_policy_args["delay"] = self.restart_policy_delay
|
|
if self.restart_policy_attempts is not None:
|
|
restart_policy_args["max_attempts"] = self.restart_policy_attempts
|
|
if self.restart_policy_window is not None:
|
|
restart_policy_args["window"] = self.restart_policy_window
|
|
return (
|
|
types.RestartPolicy(**restart_policy_args) if restart_policy_args else None
|
|
)
|
|
|
|
def build_rollback_config(self) -> types.RollbackConfig | None:
|
|
if self.rollback_config is None:
|
|
return None
|
|
rollback_config_options = [
|
|
"parallelism",
|
|
"delay",
|
|
"failure_action",
|
|
"monitor",
|
|
"max_failure_ratio",
|
|
"order",
|
|
]
|
|
rollback_config_args = {}
|
|
for option in rollback_config_options:
|
|
value = self.rollback_config.get(option)
|
|
if value is not None:
|
|
rollback_config_args[option] = value
|
|
return (
|
|
types.RollbackConfig(**rollback_config_args)
|
|
if rollback_config_args
|
|
else None
|
|
)
|
|
|
|
def build_resources(self) -> types.Resources | None:
|
|
resources_args: dict[str, t.Any] = {}
|
|
if self.limit_cpu is not None:
|
|
resources_args["cpu_limit"] = int(self.limit_cpu * 1000000000.0)
|
|
if self.limit_memory is not None:
|
|
resources_args["mem_limit"] = self.limit_memory
|
|
if self.reserve_cpu is not None:
|
|
resources_args["cpu_reservation"] = int(self.reserve_cpu * 1000000000.0)
|
|
if self.reserve_memory is not None:
|
|
resources_args["mem_reservation"] = self.reserve_memory
|
|
return types.Resources(**resources_args) if resources_args else None
|
|
|
|
def build_task_template(
|
|
self,
|
|
container_spec: types.ContainerSpec,
|
|
placement: types.Placement | None = None,
|
|
) -> types.TaskTemplate:
|
|
log_driver = self.build_log_driver()
|
|
restart_policy = self.build_restart_policy()
|
|
resources = self.build_resources()
|
|
|
|
task_template_args: dict[str, t.Any] = {}
|
|
if placement is not None:
|
|
task_template_args["placement"] = placement
|
|
if log_driver is not None:
|
|
task_template_args["log_driver"] = log_driver
|
|
if restart_policy is not None:
|
|
task_template_args["restart_policy"] = restart_policy
|
|
if resources is not None:
|
|
task_template_args["resources"] = resources
|
|
if self.force_update:
|
|
task_template_args["force_update"] = self.force_update
|
|
if self.can_use_task_template_networks:
|
|
networks = self.build_networks()
|
|
if networks:
|
|
task_template_args["networks"] = networks
|
|
return types.TaskTemplate(container_spec=container_spec, **task_template_args)
|
|
|
|
def build_service_mode(self) -> types.ServiceMode:
|
|
if self.mode == "global":
|
|
self.replicas = None
|
|
return types.ServiceMode(self.mode, replicas=self.replicas)
|
|
|
|
def build_networks(self) -> list[dict[str, t.Any]] | None:
|
|
networks = None
|
|
if self.networks is not None:
|
|
networks = []
|
|
for network in self.networks:
|
|
docker_network = {"Target": network["id"]}
|
|
if "aliases" in network:
|
|
docker_network["Aliases"] = network["aliases"]
|
|
if "options" in network:
|
|
docker_network["DriverOpts"] = network["options"]
|
|
networks.append(docker_network)
|
|
return networks
|
|
|
|
def build_endpoint_spec(self) -> types.EndpointSpec | None:
|
|
endpoint_spec_args: dict[str, t.Any] = {}
|
|
if self.publish is not None:
|
|
ports = []
|
|
for port in self.publish:
|
|
port_spec = {
|
|
"Protocol": port["protocol"],
|
|
"TargetPort": port["target_port"],
|
|
}
|
|
if port.get("published_port"):
|
|
port_spec["PublishedPort"] = port["published_port"]
|
|
if port.get("mode"):
|
|
port_spec["PublishMode"] = port["mode"]
|
|
ports.append(port_spec)
|
|
endpoint_spec_args["ports"] = ports
|
|
if self.endpoint_mode is not None:
|
|
endpoint_spec_args["mode"] = self.endpoint_mode
|
|
return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None
|
|
|
|
def build_docker_service(self) -> dict[str, t.Any]:
|
|
container_spec = self.build_container_spec()
|
|
placement = self.build_placement()
|
|
task_template = self.build_task_template(container_spec, placement)
|
|
|
|
update_config = self.build_update_config()
|
|
rollback_config = self.build_rollback_config()
|
|
service_mode = self.build_service_mode()
|
|
endpoint_spec = self.build_endpoint_spec()
|
|
|
|
service: dict[str, t.Any] = {
|
|
"task_template": task_template,
|
|
"mode": service_mode,
|
|
}
|
|
if update_config:
|
|
service["update_config"] = update_config
|
|
if rollback_config:
|
|
service["rollback_config"] = rollback_config
|
|
if endpoint_spec:
|
|
service["endpoint_spec"] = endpoint_spec
|
|
if self.labels:
|
|
service["labels"] = self.labels
|
|
if not self.can_use_task_template_networks:
|
|
networks = self.build_networks()
|
|
if networks:
|
|
service["networks"] = networks
|
|
return service
|
|
|
|
|
|
class DockerServiceManager:
|
|
def __init__(self, client: AnsibleDockerClient):
|
|
self.client = client
|
|
self.retries = 2
|
|
self.diff_tracker: DifferenceTracker | None = None
|
|
|
|
def get_service(self, name: str) -> DockerService | None:
|
|
try:
|
|
raw_data = self.client.inspect_service(name)
|
|
except NotFound:
|
|
return None
|
|
ds = DockerService(
|
|
self.client.docker_api_version, self.client.docker_py_version
|
|
)
|
|
|
|
task_template_data = raw_data["Spec"]["TaskTemplate"]
|
|
ds.image = task_template_data["ContainerSpec"]["Image"]
|
|
ds.user = task_template_data["ContainerSpec"].get("User")
|
|
ds.env = task_template_data["ContainerSpec"].get("Env")
|
|
ds.command = task_template_data["ContainerSpec"].get("Command")
|
|
ds.args = task_template_data["ContainerSpec"].get("Args")
|
|
ds.groups = task_template_data["ContainerSpec"].get("Groups")
|
|
ds.stop_grace_period = task_template_data["ContainerSpec"].get(
|
|
"StopGracePeriod"
|
|
)
|
|
ds.stop_signal = task_template_data["ContainerSpec"].get("StopSignal")
|
|
ds.working_dir = task_template_data["ContainerSpec"].get("Dir")
|
|
ds.read_only = task_template_data["ContainerSpec"].get("ReadOnly")
|
|
ds.cap_add = task_template_data["ContainerSpec"].get("CapabilityAdd")
|
|
ds.cap_drop = task_template_data["ContainerSpec"].get("CapabilityDrop")
|
|
ds.sysctls = task_template_data["ContainerSpec"].get("Sysctls")
|
|
|
|
healthcheck_data = task_template_data["ContainerSpec"].get("Healthcheck")
|
|
if healthcheck_data:
|
|
options = {
|
|
"Test": "test",
|
|
"Interval": "interval",
|
|
"Timeout": "timeout",
|
|
"StartPeriod": "start_period",
|
|
"Retries": "retries",
|
|
}
|
|
healthcheck = {
|
|
options[key]: value
|
|
for key, value in healthcheck_data.items()
|
|
if value is not None and key in options
|
|
}
|
|
ds.healthcheck = healthcheck
|
|
|
|
update_config_data = raw_data["Spec"].get("UpdateConfig")
|
|
if update_config_data:
|
|
ds.update_delay = update_config_data.get("Delay")
|
|
ds.update_parallelism = update_config_data.get("Parallelism")
|
|
ds.update_failure_action = update_config_data.get("FailureAction")
|
|
ds.update_monitor = update_config_data.get("Monitor")
|
|
ds.update_max_failure_ratio = update_config_data.get("MaxFailureRatio")
|
|
ds.update_order = update_config_data.get("Order")
|
|
|
|
rollback_config_data = raw_data["Spec"].get("RollbackConfig")
|
|
if rollback_config_data:
|
|
ds.rollback_config = {
|
|
"parallelism": rollback_config_data.get("Parallelism"),
|
|
"delay": rollback_config_data.get("Delay"),
|
|
"failure_action": rollback_config_data.get("FailureAction"),
|
|
"monitor": rollback_config_data.get("Monitor"),
|
|
"max_failure_ratio": rollback_config_data.get("MaxFailureRatio"),
|
|
"order": rollback_config_data.get("Order"),
|
|
}
|
|
|
|
dns_config = task_template_data["ContainerSpec"].get("DNSConfig")
|
|
if dns_config:
|
|
ds.dns = dns_config.get("Nameservers")
|
|
ds.dns_search = dns_config.get("Search")
|
|
ds.dns_options = dns_config.get("Options")
|
|
|
|
ds.hostname = task_template_data["ContainerSpec"].get("Hostname")
|
|
|
|
hosts = task_template_data["ContainerSpec"].get("Hosts")
|
|
if hosts:
|
|
hosts = [
|
|
(
|
|
list(reversed(host.split(":", 1)))
|
|
if ":" in host
|
|
else host.split(" ", 1)
|
|
)
|
|
for host in hosts
|
|
]
|
|
ds.hosts = {hostname: ip for ip, hostname in hosts}
|
|
ds.tty = task_template_data["ContainerSpec"].get("TTY")
|
|
|
|
placement = task_template_data.get("Placement")
|
|
if placement:
|
|
ds.constraints = placement.get("Constraints")
|
|
ds.replicas_max_per_node = placement.get("MaxReplicas")
|
|
placement_preferences = []
|
|
for preference in placement.get("Preferences", []):
|
|
placement_preferences.append(
|
|
{
|
|
key.lower(): value["SpreadDescriptor"]
|
|
for key, value in preference.items()
|
|
}
|
|
)
|
|
ds.placement_preferences = placement_preferences or None
|
|
|
|
restart_policy_data = task_template_data.get("RestartPolicy")
|
|
if restart_policy_data:
|
|
ds.restart_policy = restart_policy_data.get("Condition")
|
|
ds.restart_policy_delay = restart_policy_data.get("Delay")
|
|
ds.restart_policy_attempts = restart_policy_data.get("MaxAttempts")
|
|
ds.restart_policy_window = restart_policy_data.get("Window")
|
|
|
|
raw_data_endpoint_spec = raw_data["Spec"].get("EndpointSpec")
|
|
if raw_data_endpoint_spec:
|
|
ds.endpoint_mode = raw_data_endpoint_spec.get("Mode")
|
|
raw_data_ports = raw_data_endpoint_spec.get("Ports")
|
|
if raw_data_ports:
|
|
ds.publish = []
|
|
for port in raw_data_ports:
|
|
ds.publish.append(
|
|
{
|
|
"protocol": port["Protocol"],
|
|
"mode": port.get("PublishMode", None),
|
|
"published_port": port.get("PublishedPort", None),
|
|
"target_port": int(port["TargetPort"]),
|
|
}
|
|
)
|
|
|
|
raw_data_limits = task_template_data.get("Resources", {}).get("Limits")
|
|
if raw_data_limits:
|
|
raw_cpu_limits = raw_data_limits.get("NanoCPUs")
|
|
if raw_cpu_limits:
|
|
ds.limit_cpu = float(raw_cpu_limits) / 1000000000
|
|
|
|
raw_memory_limits = raw_data_limits.get("MemoryBytes")
|
|
if raw_memory_limits:
|
|
ds.limit_memory = int(raw_memory_limits)
|
|
|
|
raw_data_reservations = task_template_data.get("Resources", {}).get(
|
|
"Reservations"
|
|
)
|
|
if raw_data_reservations:
|
|
raw_cpu_reservations = raw_data_reservations.get("NanoCPUs")
|
|
if raw_cpu_reservations:
|
|
ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000
|
|
|
|
raw_memory_reservations = raw_data_reservations.get("MemoryBytes")
|
|
if raw_memory_reservations:
|
|
ds.reserve_memory = int(raw_memory_reservations)
|
|
|
|
ds.labels = raw_data["Spec"].get("Labels")
|
|
ds.log_driver = task_template_data.get("LogDriver", {}).get("Name")
|
|
ds.log_driver_options = task_template_data.get("LogDriver", {}).get("Options")
|
|
ds.container_labels = task_template_data["ContainerSpec"].get("Labels")
|
|
|
|
mode = raw_data["Spec"]["Mode"]
|
|
if "Replicated" in mode:
|
|
ds.mode = to_text("replicated", encoding="utf-8") # type: ignore
|
|
ds.replicas = mode["Replicated"]["Replicas"]
|
|
elif "Global" in mode:
|
|
ds.mode = "global"
|
|
elif "ReplicatedJob" in mode:
|
|
ds.mode = to_text("replicated-job", encoding="utf-8") # type: ignore
|
|
ds.replicas = mode["ReplicatedJob"]["TotalCompletions"]
|
|
else:
|
|
raise ValueError(f"Unknown service mode: {mode}")
|
|
|
|
raw_data_mounts = task_template_data["ContainerSpec"].get("Mounts")
|
|
if raw_data_mounts:
|
|
ds.mounts = []
|
|
for mount_data in raw_data_mounts:
|
|
bind_options = mount_data.get("BindOptions", {})
|
|
volume_options = mount_data.get("VolumeOptions", {})
|
|
tmpfs_options = mount_data.get("TmpfsOptions", {})
|
|
driver_config = volume_options.get("DriverConfig", {})
|
|
driver_config = {
|
|
key.lower(): value for key, value in driver_config.items()
|
|
} or None
|
|
ds.mounts.append(
|
|
{
|
|
"source": mount_data.get("Source", ""),
|
|
"type": mount_data["Type"],
|
|
"target": mount_data["Target"],
|
|
"readonly": mount_data.get("ReadOnly"),
|
|
"propagation": bind_options.get("Propagation"),
|
|
"no_copy": volume_options.get("NoCopy"),
|
|
"labels": volume_options.get("Labels"),
|
|
"driver_config": driver_config,
|
|
"tmpfs_mode": tmpfs_options.get("Mode"),
|
|
"tmpfs_size": tmpfs_options.get("SizeBytes"),
|
|
}
|
|
)
|
|
|
|
raw_data_configs = task_template_data["ContainerSpec"].get("Configs")
|
|
if raw_data_configs:
|
|
ds.configs = []
|
|
for config_data in raw_data_configs:
|
|
ds.configs.append(
|
|
{
|
|
"config_id": config_data["ConfigID"],
|
|
"config_name": config_data["ConfigName"],
|
|
"filename": config_data["File"].get("Name"),
|
|
"uid": config_data["File"].get("UID"),
|
|
"gid": config_data["File"].get("GID"),
|
|
"mode": config_data["File"].get("Mode"),
|
|
}
|
|
)
|
|
|
|
raw_data_secrets = task_template_data["ContainerSpec"].get("Secrets")
|
|
if raw_data_secrets:
|
|
ds.secrets = []
|
|
for secret_data in raw_data_secrets:
|
|
ds.secrets.append(
|
|
{
|
|
"secret_id": secret_data["SecretID"],
|
|
"secret_name": secret_data["SecretName"],
|
|
"filename": secret_data["File"].get("Name"),
|
|
"uid": secret_data["File"].get("UID"),
|
|
"gid": secret_data["File"].get("GID"),
|
|
"mode": secret_data["File"].get("Mode"),
|
|
}
|
|
)
|
|
|
|
raw_networks_data = task_template_data.get(
|
|
"Networks", raw_data["Spec"].get("Networks")
|
|
)
|
|
if raw_networks_data:
|
|
ds.networks = []
|
|
for network_data in raw_networks_data:
|
|
network = {"id": network_data["Target"]}
|
|
if "Aliases" in network_data:
|
|
network["aliases"] = network_data["Aliases"]
|
|
if "DriverOpts" in network_data:
|
|
network["options"] = network_data["DriverOpts"]
|
|
ds.networks.append(network)
|
|
ds.service_version = raw_data["Version"]["Index"]
|
|
ds.service_id = raw_data["ID"]
|
|
|
|
ds.init = task_template_data["ContainerSpec"].get("Init", False)
|
|
return ds
|
|
|
|
def update_service(
|
|
self, name: str, old_service: DockerService, new_service: DockerService
|
|
) -> None:
|
|
service_data = new_service.build_docker_service()
|
|
result = self.client.update_service(
|
|
old_service.service_id,
|
|
old_service.service_version,
|
|
name=name,
|
|
**service_data,
|
|
)
|
|
# Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored.
|
|
# (see https://github.com/docker/docker-py/pull/2272)
|
|
self.client.report_warnings(result, ["Warning"])
|
|
|
|
def create_service(self, name: str, service: DockerService) -> None:
|
|
service_data = service.build_docker_service()
|
|
result = self.client.create_service(name=name, **service_data)
|
|
self.client.report_warnings(result, ["Warning"])
|
|
|
|
def remove_service(self, name: str) -> None:
|
|
self.client.remove_service(name)
|
|
|
|
def get_image_digest(self, name: str, resolve: bool = False) -> str:
|
|
if not name or not resolve:
|
|
return name
|
|
repo, tag = parse_repository_tag(name)
|
|
if not tag:
|
|
tag = "latest"
|
|
name = repo + ":" + tag
|
|
distribution_data = self.client.inspect_distribution(name)
|
|
digest = distribution_data["Descriptor"]["digest"]
|
|
return f"{name}@{digest}"
|
|
|
|
def get_networks_names_ids(self) -> dict[str, str]:
|
|
return {network["Name"]: network["Id"] for network in self.client.networks()}
|
|
|
|
def get_missing_secret_ids(self) -> dict[str, str]:
|
|
"""
|
|
Resolve missing secret ids by looking them up by name
|
|
"""
|
|
secret_names = [
|
|
secret["secret_name"]
|
|
for secret in self.client.module.params.get("secrets") or []
|
|
if secret["secret_id"] is None
|
|
]
|
|
if not secret_names:
|
|
return {}
|
|
secrets = self.client.secrets(filters={"name": secret_names})
|
|
secrets = {
|
|
secret["Spec"]["Name"]: secret["ID"]
|
|
for secret in secrets
|
|
if secret["Spec"]["Name"] in secret_names
|
|
}
|
|
for secret_name in secret_names:
|
|
if secret_name not in secrets:
|
|
self.client.fail(f'Could not find a secret named "{secret_name}"')
|
|
return secrets
|
|
|
|
def get_missing_config_ids(self) -> dict[str, str]:
|
|
"""
|
|
Resolve missing config ids by looking them up by name
|
|
"""
|
|
config_names = [
|
|
config["config_name"]
|
|
for config in self.client.module.params.get("configs") or []
|
|
if config["config_id"] is None
|
|
]
|
|
if not config_names:
|
|
return {}
|
|
configs = self.client.configs(filters={"name": config_names})
|
|
configs = {
|
|
config["Spec"]["Name"]: config["ID"]
|
|
for config in configs
|
|
if config["Spec"]["Name"] in config_names
|
|
}
|
|
for config_name in config_names:
|
|
if config_name not in configs:
|
|
self.client.fail(f'Could not find a config named "{config_name}"')
|
|
return configs
|
|
|
|
def run(self) -> tuple[str, bool, bool, list[str], dict[str, t.Any]]:
|
|
self.diff_tracker = DifferenceTracker()
|
|
module = self.client.module
|
|
|
|
image = module.params["image"]
|
|
try:
|
|
image_digest = self.get_image_digest(
|
|
name=image, resolve=module.params["resolve_image"]
|
|
)
|
|
except DockerException as e:
|
|
self.client.fail(f"Error looking for an image named {image}: {e}")
|
|
|
|
try:
|
|
current_service = self.get_service(module.params["name"])
|
|
except Exception as e: # pylint: disable=broad-exception-caught
|
|
self.client.fail(
|
|
f"Error looking for service named {module.params['name']}: {e}"
|
|
)
|
|
try:
|
|
secret_ids = self.get_missing_secret_ids()
|
|
config_ids = self.get_missing_config_ids()
|
|
network_ids = self.get_networks_names_ids()
|
|
new_service = DockerService.from_ansible_params(
|
|
module.params,
|
|
current_service,
|
|
image_digest,
|
|
secret_ids,
|
|
config_ids,
|
|
network_ids,
|
|
self.client,
|
|
)
|
|
except Exception as e: # pylint: disable=broad-exception-caught
|
|
return self.client.fail(f"Error parsing module parameters: {e}")
|
|
|
|
changed = False
|
|
msg = "noop"
|
|
rebuilt = False
|
|
differences = DifferenceTracker()
|
|
facts = {}
|
|
|
|
if current_service:
|
|
if module.params["state"] == "absent":
|
|
if not module.check_mode:
|
|
self.remove_service(module.params["name"])
|
|
msg = "Service removed"
|
|
changed = True
|
|
else:
|
|
changed, differences, need_rebuild, force_update = new_service.compare(
|
|
current_service
|
|
)
|
|
if changed:
|
|
self.diff_tracker.merge(differences)
|
|
if need_rebuild:
|
|
if not module.check_mode:
|
|
self.remove_service(module.params["name"])
|
|
self.create_service(module.params["name"], new_service)
|
|
msg = "Service rebuilt"
|
|
rebuilt = True
|
|
else:
|
|
if not module.check_mode:
|
|
self.update_service(
|
|
module.params["name"], current_service, new_service
|
|
)
|
|
msg = "Service updated"
|
|
rebuilt = False
|
|
else:
|
|
if force_update:
|
|
if not module.check_mode:
|
|
self.update_service(
|
|
module.params["name"], current_service, new_service
|
|
)
|
|
msg = "Service forcefully updated"
|
|
rebuilt = False
|
|
changed = True
|
|
else:
|
|
msg = "Service unchanged"
|
|
facts = new_service.get_facts()
|
|
else:
|
|
if module.params["state"] == "absent":
|
|
msg = "Service absent"
|
|
else:
|
|
if not module.check_mode:
|
|
self.create_service(module.params["name"], new_service)
|
|
msg = "Service created"
|
|
changed = True
|
|
facts = new_service.get_facts()
|
|
|
|
return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts
|
|
|
|
def run_safe(self) -> tuple[str, bool, bool, list[str], dict[str, t.Any]]:
|
|
while True:
|
|
try:
|
|
return self.run()
|
|
except APIError as e:
|
|
# Sometimes Version.Index will have changed between an inspect and
|
|
# update. If this is encountered we'll retry the update.
|
|
if self.retries > 0 and "update out of sequence" in str(e.explanation):
|
|
self.retries -= 1
|
|
time.sleep(1)
|
|
else:
|
|
raise
|
|
|
|
|
|
def _detect_publish_mode_usage(client: AnsibleDockerClient) -> bool:
|
|
return any(
|
|
publish_def.get("mode") for publish_def in client.module.params["publish"] or []
|
|
)
|
|
|
|
|
|
def _detect_healthcheck_start_period(client: AnsibleDockerClient) -> bool:
|
|
if client.module.params["healthcheck"]:
|
|
return client.module.params["healthcheck"]["start_period"] is not None
|
|
return False
|
|
|
|
|
|
def _detect_mount_tmpfs_usage(client: AnsibleDockerClient) -> bool:
|
|
for mount in client.module.params["mounts"] or []:
|
|
if mount.get("type") == "tmpfs":
|
|
return True
|
|
if mount.get("tmpfs_size") is not None:
|
|
return True
|
|
if mount.get("tmpfs_mode") is not None:
|
|
return True
|
|
return False
|
|
|
|
|
|
def _detect_update_config_failure_action_rollback(client: AnsibleDockerClient) -> bool:
|
|
rollback_config_failure_action = (client.module.params["update_config"] or {}).get(
|
|
"failure_action"
|
|
)
|
|
return rollback_config_failure_action == "rollback"
|
|
|
|
|
|
def main() -> None:
|
|
argument_spec = {
|
|
"name": {"type": "str", "required": True},
|
|
"image": {"type": "str"},
|
|
"state": {
|
|
"type": "str",
|
|
"default": "present",
|
|
"choices": ["present", "absent"],
|
|
},
|
|
"mounts": {
|
|
"type": "list",
|
|
"elements": "dict",
|
|
"options": {
|
|
"source": {"type": "str"},
|
|
"target": {"type": "str", "required": True},
|
|
"type": {
|
|
"type": "str",
|
|
"default": "bind",
|
|
"choices": ["bind", "volume", "tmpfs", "npipe"],
|
|
},
|
|
"readonly": {"type": "bool"},
|
|
"labels": {"type": "dict"},
|
|
"propagation": {
|
|
"type": "str",
|
|
"choices": [
|
|
"shared",
|
|
"slave",
|
|
"private",
|
|
"rshared",
|
|
"rslave",
|
|
"rprivate",
|
|
],
|
|
},
|
|
"no_copy": {"type": "bool"},
|
|
"driver_config": {
|
|
"type": "dict",
|
|
"options": {"name": {"type": "str"}, "options": {"type": "dict"}},
|
|
},
|
|
"tmpfs_size": {"type": "str"},
|
|
"tmpfs_mode": {"type": "int"},
|
|
},
|
|
},
|
|
"configs": {
|
|
"type": "list",
|
|
"elements": "dict",
|
|
"options": {
|
|
"config_id": {"type": "str"},
|
|
"config_name": {"type": "str", "required": True},
|
|
"filename": {"type": "str"},
|
|
"uid": {"type": "str"},
|
|
"gid": {"type": "str"},
|
|
"mode": {"type": "int"},
|
|
},
|
|
},
|
|
"secrets": {
|
|
"type": "list",
|
|
"elements": "dict",
|
|
"no_log": False,
|
|
"options": {
|
|
"secret_id": {"type": "str", "no_log": False},
|
|
"secret_name": {"type": "str", "required": True, "no_log": False},
|
|
"filename": {"type": "str"},
|
|
"uid": {"type": "str"},
|
|
"gid": {"type": "str"},
|
|
"mode": {"type": "int"},
|
|
},
|
|
},
|
|
"networks": {"type": "list", "elements": "raw"},
|
|
"command": {"type": "raw"},
|
|
"args": {"type": "list", "elements": "str"},
|
|
"env": {"type": "raw"},
|
|
"env_files": {"type": "list", "elements": "path"},
|
|
"force_update": {"type": "bool", "default": False},
|
|
"groups": {"type": "list", "elements": "str"},
|
|
"logging": {
|
|
"type": "dict",
|
|
"options": {
|
|
"driver": {"type": "str"},
|
|
"options": {"type": "dict"},
|
|
},
|
|
},
|
|
"publish": {
|
|
"type": "list",
|
|
"elements": "dict",
|
|
"options": {
|
|
"published_port": {"type": "int", "required": False},
|
|
"target_port": {"type": "int", "required": True},
|
|
"protocol": {
|
|
"type": "str",
|
|
"default": "tcp",
|
|
"choices": ["tcp", "udp"],
|
|
},
|
|
"mode": {"type": "str", "choices": ["ingress", "host"]},
|
|
},
|
|
},
|
|
"placement": {
|
|
"type": "dict",
|
|
"options": {
|
|
"constraints": {"type": "list", "elements": "str"},
|
|
"preferences": {"type": "list", "elements": "dict"},
|
|
"replicas_max_per_node": {"type": "int"},
|
|
},
|
|
},
|
|
"tty": {"type": "bool"},
|
|
"dns": {"type": "list", "elements": "str"},
|
|
"dns_search": {"type": "list", "elements": "str"},
|
|
"dns_options": {"type": "list", "elements": "str"},
|
|
"healthcheck": {
|
|
"type": "dict",
|
|
"options": {
|
|
"test": {"type": "raw"},
|
|
"interval": {"type": "str"},
|
|
"timeout": {"type": "str"},
|
|
"start_period": {"type": "str"},
|
|
"retries": {"type": "int"},
|
|
},
|
|
},
|
|
"hostname": {"type": "str"},
|
|
"hosts": {"type": "dict"},
|
|
"labels": {"type": "dict"},
|
|
"container_labels": {"type": "dict"},
|
|
"sysctls": {"type": "dict"},
|
|
"mode": {
|
|
"type": "str",
|
|
"default": "replicated",
|
|
"choices": ["replicated", "global", "replicated-job"],
|
|
},
|
|
"replicas": {"type": "int", "default": -1},
|
|
"endpoint_mode": {"type": "str", "choices": ["vip", "dnsrr"]},
|
|
"stop_grace_period": {"type": "str"},
|
|
"stop_signal": {"type": "str"},
|
|
"limits": {
|
|
"type": "dict",
|
|
"options": {
|
|
"cpus": {"type": "float"},
|
|
"memory": {"type": "str"},
|
|
},
|
|
},
|
|
"read_only": {"type": "bool"},
|
|
"reservations": {
|
|
"type": "dict",
|
|
"options": {
|
|
"cpus": {"type": "float"},
|
|
"memory": {"type": "str"},
|
|
},
|
|
},
|
|
"resolve_image": {"type": "bool", "default": False},
|
|
"restart_config": {
|
|
"type": "dict",
|
|
"options": {
|
|
"condition": {"type": "str", "choices": ["none", "on-failure", "any"]},
|
|
"delay": {"type": "str"},
|
|
"max_attempts": {"type": "int"},
|
|
"window": {"type": "str"},
|
|
},
|
|
},
|
|
"rollback_config": {
|
|
"type": "dict",
|
|
"options": {
|
|
"parallelism": {"type": "int"},
|
|
"delay": {"type": "str"},
|
|
"failure_action": {"type": "str", "choices": ["continue", "pause"]},
|
|
"monitor": {"type": "str"},
|
|
"max_failure_ratio": {"type": "float"},
|
|
"order": {"type": "str"},
|
|
},
|
|
},
|
|
"update_config": {
|
|
"type": "dict",
|
|
"options": {
|
|
"parallelism": {"type": "int"},
|
|
"delay": {"type": "str"},
|
|
"failure_action": {
|
|
"type": "str",
|
|
"choices": ["continue", "pause", "rollback"],
|
|
},
|
|
"monitor": {"type": "str"},
|
|
"max_failure_ratio": {"type": "float"},
|
|
"order": {"type": "str"},
|
|
},
|
|
},
|
|
"user": {"type": "str"},
|
|
"working_dir": {"type": "str"},
|
|
"init": {"type": "bool"},
|
|
"cap_add": {"type": "list", "elements": "str"},
|
|
"cap_drop": {"type": "list", "elements": "str"},
|
|
}
|
|
|
|
option_minimal_versions = {
|
|
"dns": {"docker_py_version": "2.6.0"},
|
|
"dns_options": {"docker_py_version": "2.6.0"},
|
|
"dns_search": {"docker_py_version": "2.6.0"},
|
|
"endpoint_mode": {"docker_py_version": "3.0.0"},
|
|
"force_update": {"docker_py_version": "2.1.0"},
|
|
"healthcheck": {"docker_py_version": "2.6.0"},
|
|
"hostname": {"docker_py_version": "2.2.0"},
|
|
"hosts": {"docker_py_version": "2.6.0"},
|
|
"groups": {"docker_py_version": "2.6.0"},
|
|
"tty": {"docker_py_version": "2.4.0"},
|
|
"secrets": {"docker_py_version": "2.4.0"},
|
|
"configs": {"docker_py_version": "2.6.0", "docker_api_version": "1.30"},
|
|
"stop_signal": {"docker_py_version": "2.6.0", "docker_api_version": "1.28"},
|
|
"publish": {"docker_py_version": "3.0.0"},
|
|
"read_only": {"docker_py_version": "2.6.0", "docker_api_version": "1.28"},
|
|
"resolve_image": {"docker_api_version": "1.30", "docker_py_version": "3.2.0"},
|
|
"rollback_config": {"docker_py_version": "3.5.0", "docker_api_version": "1.28"},
|
|
"init": {"docker_py_version": "4.0.0", "docker_api_version": "1.37"},
|
|
"cap_add": {"docker_py_version": "5.0.3", "docker_api_version": "1.41"},
|
|
"cap_drop": {"docker_py_version": "5.0.3", "docker_api_version": "1.41"},
|
|
"sysctls": {"docker_py_version": "6.0.0", "docker_api_version": "1.40"},
|
|
# specials
|
|
"publish_mode": {
|
|
"docker_py_version": "3.0.0",
|
|
"detect_usage": _detect_publish_mode_usage,
|
|
"usage_msg": "set publish.mode",
|
|
},
|
|
"healthcheck_start_period": {
|
|
"docker_py_version": "2.6.0",
|
|
"docker_api_version": "1.29",
|
|
"detect_usage": _detect_healthcheck_start_period,
|
|
"usage_msg": "set healthcheck.start_period",
|
|
},
|
|
"update_config_max_failure_ratio": {
|
|
"docker_py_version": "2.1.0",
|
|
"detect_usage": lambda c: (c.module.params["update_config"] or {}).get(
|
|
"max_failure_ratio"
|
|
)
|
|
is not None,
|
|
"usage_msg": "set update_config.max_failure_ratio",
|
|
},
|
|
"update_config_failure_action": {
|
|
"docker_py_version": "3.5.0",
|
|
"docker_api_version": "1.28",
|
|
"detect_usage": _detect_update_config_failure_action_rollback,
|
|
"usage_msg": "set update_config.failure_action.rollback",
|
|
},
|
|
"update_config_monitor": {
|
|
"docker_py_version": "2.1.0",
|
|
"detect_usage": lambda c: (c.module.params["update_config"] or {}).get(
|
|
"monitor"
|
|
)
|
|
is not None,
|
|
"usage_msg": "set update_config.monitor",
|
|
},
|
|
"update_config_order": {
|
|
"docker_py_version": "2.7.0",
|
|
"docker_api_version": "1.29",
|
|
"detect_usage": lambda c: (c.module.params["update_config"] or {}).get(
|
|
"order"
|
|
)
|
|
is not None,
|
|
"usage_msg": "set update_config.order",
|
|
},
|
|
"placement_config_preferences": {
|
|
"docker_py_version": "2.4.0",
|
|
"docker_api_version": "1.27",
|
|
"detect_usage": lambda c: (c.module.params["placement"] or {}).get(
|
|
"preferences"
|
|
)
|
|
is not None,
|
|
"usage_msg": "set placement.preferences",
|
|
},
|
|
"placement_config_constraints": {
|
|
"docker_py_version": "2.4.0",
|
|
"detect_usage": lambda c: (c.module.params["placement"] or {}).get(
|
|
"constraints"
|
|
)
|
|
is not None,
|
|
"usage_msg": "set placement.constraints",
|
|
},
|
|
"placement_config_replicas_max_per_node": {
|
|
"docker_py_version": "4.4.3",
|
|
"docker_api_version": "1.40",
|
|
"detect_usage": lambda c: (c.module.params["placement"] or {}).get(
|
|
"replicas_max_per_node"
|
|
)
|
|
is not None,
|
|
"usage_msg": "set placement.replicas_max_per_node",
|
|
},
|
|
"mounts_tmpfs": {
|
|
"docker_py_version": "2.6.0",
|
|
"detect_usage": _detect_mount_tmpfs_usage,
|
|
"usage_msg": "set mounts.tmpfs",
|
|
},
|
|
"rollback_config_order": {
|
|
"docker_api_version": "1.29",
|
|
"detect_usage": lambda c: (c.module.params["rollback_config"] or {}).get(
|
|
"order"
|
|
)
|
|
is not None,
|
|
"usage_msg": "set rollback_config.order",
|
|
},
|
|
"mode_replicated_job": {
|
|
"docker_py_version": "6.0.0",
|
|
"docker_api_version": "1.41",
|
|
"detect_usage": lambda c: c.module.params.get("mode") == "replicated-job",
|
|
"usage_msg": "set mode",
|
|
},
|
|
}
|
|
required_if = [("state", "present", ["image"])]
|
|
|
|
client = AnsibleDockerClient(
|
|
argument_spec=argument_spec,
|
|
required_if=required_if,
|
|
supports_check_mode=True,
|
|
min_docker_version="2.0.2",
|
|
option_minimal_versions=option_minimal_versions,
|
|
)
|
|
|
|
try:
|
|
dsm = DockerServiceManager(client)
|
|
msg, changed, rebuilt, changes, facts = dsm.run_safe()
|
|
|
|
results = {
|
|
"msg": msg,
|
|
"changed": changed,
|
|
"rebuilt": rebuilt,
|
|
"changes": changes,
|
|
"swarm_service": facts,
|
|
}
|
|
if client.module._diff:
|
|
assert dsm.diff_tracker is not None
|
|
before, after = dsm.diff_tracker.get_before_after()
|
|
results["diff"] = {"before": before, "after": after}
|
|
|
|
client.module.exit_json(**results)
|
|
except DockerException as e:
|
|
client.fail(
|
|
f"An unexpected Docker error occurred: {e}",
|
|
exception=traceback.format_exc(),
|
|
)
|
|
except RequestException as e:
|
|
client.fail(
|
|
f"An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}",
|
|
exception=traceback.format_exc(),
|
|
)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|