mirror of
https://github.com/ansible-collections/community.docker.git
synced 2025-12-15 19:42:06 +00:00
* Prevent RCE via inventory plugins.
* Do not make ansible_connection unsafe.
* Add test.
(cherry picked from commit bf1281ae7f)
This commit is contained in:
parent
5cddffa8bb
commit
e2e1bc796b
2
changelogs/fragments/inventory-rce.yml
Normal file
2
changelogs/fragments/inventory-rce.yml
Normal file
@ -0,0 +1,2 @@
|
||||
security_fixes:
|
||||
- "docker_containers, docker_machine, and docker_swarm inventory plugins - make sure all data received from the Docker daemon / Docker machine is marked as unsafe, so remote code execution by obtaining texts that can be evaluated as templates is not possible (https://www.die-welt.net/2024/03/remote-code-execution-in-ansible-dynamic-inventory-plugins/, https://github.com/ansible-collections/community.docker/pull/815)."
|
||||
@ -153,6 +153,7 @@ import re
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
RequestException,
|
||||
@ -221,8 +222,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
|
||||
self.inventory.add_host(name)
|
||||
facts = dict(
|
||||
docker_name=name,
|
||||
docker_short_id=short_id
|
||||
docker_name=make_unsafe(name),
|
||||
docker_short_id=make_unsafe(short_id),
|
||||
)
|
||||
full_facts = dict()
|
||||
|
||||
@ -257,6 +258,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
self.inventory.add_group('service_{0}'.format(service_name))
|
||||
self.inventory.add_host(name, group='service_{0}'.format(service_name))
|
||||
|
||||
ansible_connection = None
|
||||
if connection_type == 'ssh':
|
||||
# Figure out ssh IP and Port
|
||||
try:
|
||||
@ -277,20 +279,27 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
elif connection_type == 'docker-cli':
|
||||
facts.update(dict(
|
||||
ansible_host=full_name,
|
||||
ansible_connection='community.docker.docker',
|
||||
))
|
||||
ansible_connection = 'community.docker.docker'
|
||||
elif connection_type == 'docker-api':
|
||||
facts.update(dict(
|
||||
ansible_host=full_name,
|
||||
ansible_connection='community.docker.docker_api',
|
||||
))
|
||||
facts.update(extra_facts)
|
||||
ansible_connection = 'community.docker.docker_api'
|
||||
|
||||
full_facts.update(facts)
|
||||
for key, value in inspect.items():
|
||||
fact_key = self._slugify(key)
|
||||
full_facts[fact_key] = value
|
||||
|
||||
full_facts = make_unsafe(full_facts)
|
||||
|
||||
if ansible_connection:
|
||||
for d in (facts, full_facts):
|
||||
if 'ansible_connection' not in d:
|
||||
d['ansible_connection'] = ansible_connection
|
||||
|
||||
if verbose_output:
|
||||
facts.update(full_facts)
|
||||
|
||||
|
||||
@ -92,6 +92,7 @@ from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
|
||||
|
||||
import json
|
||||
import re
|
||||
@ -172,7 +173,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
def _inspect_docker_machine_host(self, node):
|
||||
try:
|
||||
inspect_lines = self._run_command(['inspect', self.node])
|
||||
inspect_lines = self._run_command(['inspect', node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
@ -180,7 +181,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
def _ip_addr_docker_machine_host(self, node):
|
||||
try:
|
||||
ip_addr = self._run_command(['ip', self.node])
|
||||
ip_addr = self._run_command(['ip', node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
@ -201,12 +202,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
def _populate(self):
|
||||
daemon_env = self.get_option('daemon_env')
|
||||
try:
|
||||
for self.node in self._get_machine_names():
|
||||
self.node_attrs = self._inspect_docker_machine_host(self.node)
|
||||
if not self.node_attrs:
|
||||
for node in self._get_machine_names():
|
||||
node_attrs = self._inspect_docker_machine_host(node)
|
||||
if not node_attrs:
|
||||
continue
|
||||
|
||||
machine_name = self.node_attrs['Driver']['MachineName']
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
|
||||
machine_name = unsafe_node_attrs['Driver']['MachineName']
|
||||
|
||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||
# that could be used to set environment variables to influence a local Docker client:
|
||||
@ -223,40 +226,40 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
||||
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
||||
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
||||
if self.node_attrs['Driver']['IPAddress']:
|
||||
ip_addr = self.node_attrs['Driver']['IPAddress']
|
||||
if unsafe_node_attrs['Driver']['IPAddress']:
|
||||
ip_addr = unsafe_node_attrs['Driver']['IPAddress']
|
||||
else:
|
||||
ip_addr = self._ip_addr_docker_machine_host(self.node)
|
||||
ip_addr = self._ip_addr_docker_machine_host(node)
|
||||
|
||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||
self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
|
||||
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_host', make_unsafe(ip_addr))
|
||||
self.inventory.set_variable(machine_name, 'ansible_port', unsafe_node_attrs['Driver']['SSHPort'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_user', unsafe_node_attrs['Driver']['SSHUser'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', unsafe_node_attrs['Driver']['SSHKeyPath'])
|
||||
|
||||
# set variables based on Docker Machine tags
|
||||
tags = self.node_attrs['Driver'].get('Tags') or ''
|
||||
self.inventory.set_variable(machine_name, 'dm_tags', tags)
|
||||
tags = unsafe_node_attrs['Driver'].get('Tags') or ''
|
||||
self.inventory.set_variable(machine_name, 'dm_tags', make_unsafe(tags))
|
||||
|
||||
# set variables based on Docker Machine env variables
|
||||
for kv in env_var_tuples:
|
||||
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
|
||||
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), make_unsafe(kv[1]))
|
||||
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
|
||||
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', unsafe_node_attrs)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
|
||||
self._set_composite_vars(self.get_option('compose'), unsafe_node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), unsafe_node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), unsafe_node_attrs, machine_name, strict=strict)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
|
||||
|
||||
@ -151,6 +151,7 @@ from ansible_collections.community.docker.plugins.module_utils.common import get
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import update_tls_hostname
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
|
||||
|
||||
try:
|
||||
import docker
|
||||
@ -201,48 +202,49 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
|
||||
try:
|
||||
self.nodes = self.client.nodes.list()
|
||||
for self.node in self.nodes:
|
||||
self.node_attrs = self.client.nodes.get(self.node.id).attrs
|
||||
self.inventory.add_host(self.node_attrs['ID'])
|
||||
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
|
||||
self.node_attrs['Status']['Addr'])
|
||||
for node in self.nodes:
|
||||
node_attrs = self.client.nodes.get(node.id).attrs
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'])
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'], group=unsafe_node_attrs['Spec']['Role'])
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host',
|
||||
unsafe_node_attrs['Status']['Addr'])
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host_uri',
|
||||
make_unsafe('tcp://' + unsafe_node_attrs['Status']['Addr'] + ':' + host_uri_port))
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
|
||||
if 'ManagerStatus' in self.node_attrs:
|
||||
if self.node_attrs['ManagerStatus'].get('Leader'):
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'docker_swarm_node_attributes', unsafe_node_attrs)
|
||||
if 'ManagerStatus' in unsafe_node_attrs:
|
||||
if unsafe_node_attrs['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||
self.node_attrs['Status']['Addr']
|
||||
swarm_leader_ip = parse_address(node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||
unsafe_node_attrs['Status']['Addr']
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='leader')
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host_uri',
|
||||
make_unsafe('tcp://' + swarm_leader_ip + ':' + host_uri_port))
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host', make_unsafe(swarm_leader_ip))
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'], group='leader')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'], group='nonleaders')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'], group='nonleaders')
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs['ID'],
|
||||
strict=strict)
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
||||
|
||||
@ -21,6 +21,8 @@
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
published_ports:
|
||||
- 22/tcp
|
||||
labels:
|
||||
foo: !unsafe 'EVALU{{ "" }}ATED'
|
||||
loop:
|
||||
- name: ansible-docker-test-docker-inventory-container-1
|
||||
- name: ansible-docker-test-docker-inventory-container-2
|
||||
|
||||
@ -21,7 +21,6 @@
|
||||
# will be other containers.
|
||||
- inventory_hostname.startswith('ansible-docker-test-docker-inventory-container-')
|
||||
block:
|
||||
|
||||
- name: Run raw command
|
||||
raw: ls /
|
||||
register: output
|
||||
|
||||
@ -43,3 +43,13 @@
|
||||
# When the integration tests are run inside a docker container, there
|
||||
# will be other containers.
|
||||
- inventory_hostname.startswith('ansible-docker-test-docker-inventory-container-')
|
||||
- name: Write labels into file
|
||||
copy:
|
||||
dest: "/tmp/{{ inventory_hostname }}-labels.txt"
|
||||
content: |-
|
||||
{{ docker_config.Labels }}
|
||||
delegate_to: localhost
|
||||
when:
|
||||
# When the integration tests are run inside a docker container, there
|
||||
# will be other containers.
|
||||
- inventory_hostname.startswith('ansible-docker-test-docker-inventory-container-')
|
||||
|
||||
@ -19,4 +19,10 @@ echo "Test docker_containers inventory 1"
|
||||
ansible-playbook -i inventory_1.docker.yml playbooks/test_inventory_1.yml "$@"
|
||||
|
||||
echo "Test docker_containers inventory 2"
|
||||
rm -f /tmp/ansible-docker-test-docker-inventory-container-*-labels.txt
|
||||
ansible-playbook -i inventory_2.docker.yml playbooks/test_inventory_2.yml "$@"
|
||||
|
||||
echo "Validate that 'EVALUATED' does not appear in the labels"
|
||||
for FILENAME in /tmp/ansible-docker-test-docker-inventory-container-*-labels.txt; do
|
||||
grep -qv EVALUATED "${FILENAME}" || ( echo "${FILENAME} contains EVALUATED!" && exit 1 )
|
||||
done
|
||||
|
||||
Loading…
Reference in New Issue
Block a user