Add docker_container_copy_into module (#545)

* Move copying functionality to module_utils.

* Add docker_container_copy_into module.

* Use new module in other tests.

* Fix copyright and attributes.

* Improve idempotency, improve stat code.

* Document and test when a stopped container works.

* Improve owner/group detection error handling when container is stopped.

* Fix formulation.

Co-authored-by: Brian Scholer <1260690+briantist@users.noreply.github.com>

* Improve file comparison.

* Avoid reading whole file at once.

* Stream when fetching files from daemon.

* Fix comment.

* Use read() instead of read1().

* Stream files when copying into container.

* Linting.

* Add force parameter.

* Simplify library code.

* Linting.

* Add content and content_is_b64 options.

* Make force=false work as for copy module: only copy if the destination does not exist.

* Improve docs.

* content should be no_log.

* Implement diff mode.

* Improve error handling.

* Lint and improve.

* Set owner/group ID to avoid ID lookup (which fails in paused containers).

* Apply suggestions from code review

Co-authored-by: Brian Scholer <1260690+briantist@users.noreply.github.com>

Co-authored-by: Brian Scholer <1260690+briantist@users.noreply.github.com>
This commit is contained in:
Felix Fontein 2023-01-09 11:52:29 +01:00 committed by GitHub
parent 134d32cae6
commit e198e4ab43
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 3920 additions and 101 deletions

View File

@ -57,6 +57,7 @@ If you use the Ansible package and do not update collections independently, use
* Modules:
* Docker:
- community.docker.docker_container: manage Docker containers
- community.docker.docker_container_copy_into: copy a file into a Docker container
- community.docker.docker_container_exec: run commands in Docker containers
- community.docker.docker_container_info: retrieve information on Docker containers
- community.docker.docker_host_info: retrieve information on the Docker daemon

View File

@ -0,0 +1,2 @@
minor_changes:
- "docker_api connection plugin - when copying files to/from a container, stream the file contents instead of first reading them to memory (https://github.com/ansible-collections/community.docker/pull/545)."

View File

@ -9,6 +9,7 @@ action_groups:
- docker_compose
- docker_config
- docker_container
- docker_container_copy_into
- docker_container_exec
- docker_container_info
- docker_host_info

View File

@ -0,0 +1,41 @@
# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import base64
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
from ansible_collections.community.docker.plugins.module_utils._scramble import unscramble
class ActionModule(ActionBase):
# Set to True when transfering files to the remote
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
self._task.args['_max_file_size_for_diff'] = C.MAX_FILE_SIZE_FOR_DIFF
result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val))
if u'diff' in result and result[u'diff'].get(u'scrambled_diff'):
# Scrambling is not done for security, but to avoid no_log screwing up the diff
diff = result[u'diff']
key = base64.b64decode(diff.pop(u'scrambled_diff'))
for k in (u'before', u'after'):
if k in diff:
diff[k] = unscramble(diff[k], key)
return result

View File

@ -68,11 +68,8 @@ options:
type: integer
'''
import io
import os
import os.path
import shutil
import tarfile
from ansible.errors import AnsibleFileNotFound, AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
@ -82,6 +79,13 @@ from ansible.utils.display import Display
from ansible_collections.community.docker.plugins.module_utils.common_api import (
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.copy import (
DockerFileCopyError,
DockerFileNotFound,
fetch_file,
put_file,
)
from ansible_collections.community.docker.plugins.plugin_utils.socket_handler import (
DockerSocketHandler,
)
@ -89,7 +93,6 @@ from ansible_collections.community.docker.plugins.plugin_utils.common_api import
AnsibleDockerClient,
)
from ansible_collections.community.docker.plugins.module_utils._api.constants import DEFAULT_DATA_CHUNK_SIZE
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
MIN_DOCKER_API = None
@ -260,24 +263,12 @@ class Connection(ConnectionBase):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def _put_archive(self, container, path, data):
# data can also be file object for streaming. This is because _put uses requests's put().
# See https://2.python-requests.org/en/master/user/advanced/#streaming-uploads
# WARNING: might not work with all transports!
url = self.client._url('/containers/{0}/archive', container)
res = self.client._put(url, params={'path': path}, data=data)
self.client._raise_for_status(res)
return res.status_code == 200
def put_file(self, in_path, out_path):
""" Transfer a file from local to docker container """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
out_path = self._prefix_login_path(out_path)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound(
"file or module does not exist: %s" % to_native(in_path))
if self.actual_user not in self.ids:
dummy, ids, dummy = self.exec_command(b'id -u && id -g')
@ -294,43 +285,25 @@ class Connection(ConnectionBase):
.format(e, self.get_option('remote_addr'), ids)
)
b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_dir, out_file = os.path.split(out_path)
# TODO: stream tar file, instead of creating it in-memory into a BytesIO
bio = io.BytesIO()
with tarfile.open(fileobj=bio, mode='w|', dereference=True, encoding='utf-8') as tar:
# Note that without both name (bytes) and arcname (unicode), this either fails for
# Python 2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
# form) it works with Python 2.7, 3.5, 3.6, and 3.7 up to 3.11
tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
user_id, group_id = self.ids[self.actual_user]
tarinfo.uid = user_id
tarinfo.uname = ''
if self.actual_user:
tarinfo.uname = self.actual_user
tarinfo.gid = group_id
tarinfo.gname = ''
tarinfo.mode &= 0o700
with open(b_in_path, 'rb') as f:
tar.addfile(tarinfo, fileobj=f)
data = bio.getvalue()
ok = self._call_client(
lambda: self._put_archive(
self.get_option('remote_addr'),
out_dir,
data,
),
not_found_can_be_resource=True,
)
if not ok:
raise AnsibleConnectionFailure(
'Unknown error while creating file "{0}" in container "{1}".'
.format(out_path, self.get_option('remote_addr'))
user_id, group_id = self.ids[self.actual_user]
try:
self._call_client(
lambda: put_file(
self.client,
container=self.get_option('remote_addr'),
in_path=in_path,
out_path=out_path,
user_id=user_id,
group_id=group_id,
user_name=self.actual_user,
follow_links=True,
),
not_found_can_be_resource=True,
)
except DockerFileNotFound as exc:
raise AnsibleFileNotFound(to_native(exc))
except DockerFileCopyError as exc:
raise AnsibleConnectionFailure(to_native(exc))
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
@ -338,55 +311,23 @@ class Connection(ConnectionBase):
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
in_path = self._prefix_login_path(in_path)
b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
considered_in_paths = set()
while True:
if in_path in considered_in_paths:
raise AnsibleConnectionFailure('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path))
considered_in_paths.add(in_path)
display.vvvv('FETCH: Fetching "%s"' % in_path, host=self.get_option('remote_addr'))
stream = self._call_client(
lambda: self.client.get_raw_stream(
'/containers/{0}/archive', self.get_option('remote_addr'),
params={'path': in_path},
headers={'Accept-Encoding': 'identity'},
try:
self._call_client(
lambda: fetch_file(
self.client,
container=self.get_option('remote_addr'),
in_path=in_path,
out_path=out_path,
follow_links=True,
log=lambda msg: display.vvvv(msg, host=self.get_option('remote_addr')),
),
not_found_can_be_resource=True,
)
# TODO: stream tar file instead of downloading it into a BytesIO
bio = io.BytesIO()
for chunk in stream:
bio.write(chunk)
bio.seek(0)
with tarfile.open(fileobj=bio, mode='r|') as tar:
symlink_member = None
first = True
for member in tar:
if not first:
raise AnsibleConnectionFailure('Received tarfile contains more than one file!')
first = False
if member.issym():
symlink_member = member
continue
if not member.isfile():
raise AnsibleConnectionFailure('Remote file "%s" is not a regular file or a symbolic link' % in_path)
in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
with open(b_out_path, 'wb') as out_f:
shutil.copyfileobj(in_f, out_f, member.size)
if first:
raise AnsibleConnectionFailure('Received tarfile is empty!')
# If the only member was a file, it's already extracted. If it is a symlink, process it now.
if symlink_member is not None:
in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
display.vvvv('FETCH: Following symbolic link to "%s"' % in_path, host=self.get_option('remote_addr'))
continue
return
except DockerFileNotFound as exc:
raise AnsibleFileNotFound(to_native(exc))
except DockerFileCopyError as exc:
raise AnsibleConnectionFailure(to_native(exc))
def close(self):
""" Terminate the connection. Nothing to do for Docker"""

View File

@ -227,6 +227,10 @@ class APIClient(
def _get(self, url, **kwargs):
return self.get(url, **self._set_request_timeout(kwargs))
@update_headers
def _head(self, url, **kwargs):
return self.head(url, **self._set_request_timeout(kwargs))
@update_headers
def _put(self, url, **kwargs):
return self.put(url, **self._set_request_timeout(kwargs))

View File

@ -0,0 +1,56 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import random
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.six import PY2
def generate_insecure_key():
'''Do NOT use this for cryptographic purposes!'''
while True:
# Generate a one-byte key. Right now the functions below do not use more
# than one byte, so this is sufficient.
if PY2:
key = chr(random.randint(0, 255))
else:
key = bytes([random.randint(0, 255)])
# Return anything that is not zero
if key != b'\x00':
return key
def scramble(value, key):
'''Do NOT use this for cryptographic purposes!'''
if len(key) < 1:
raise ValueError('Key must be at least one byte')
value = to_bytes(value)
if PY2:
k = ord(key[0])
value = b''.join([chr(k ^ ord(b)) for b in value])
else:
k = key[0]
value = bytes([k ^ b for b in value])
return '=S=' + to_native(base64.b64encode(value))
def unscramble(value, key):
'''Do NOT use this for cryptographic purposes!'''
if len(key) < 1:
raise ValueError('Key must be at least one byte')
if not value.startswith(u'=S='):
raise ValueError('Value does not start with indicator')
value = base64.b64decode(value[3:])
if PY2:
k = ord(key[0])
value = b''.join([chr(k ^ ord(b)) for b in value])
else:
k = key[0]
value = bytes([k ^ b for b in value])
return to_text(value)

View File

@ -0,0 +1,442 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import datetime
import io
import json
import os
import os.path
import shutil
import stat
import tarfile
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.six import raise_from
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, NotFound
class DockerFileCopyError(Exception):
pass
class DockerUnexpectedError(DockerFileCopyError):
pass
class DockerFileNotFound(DockerFileCopyError):
pass
def _put_archive(client, container, path, data):
# data can also be file object for streaming. This is because _put uses requests's put().
# See https://requests.readthedocs.io/en/latest/user/advanced/#streaming-uploads
url = client._url('/containers/{0}/archive', container)
res = client._put(url, params={'path': path}, data=data)
client._raise_for_status(res)
return res.status_code == 200
def _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
if not stat.S_ISLNK(file_stat.st_mode):
raise DockerUnexpectedError('stat information is not for a symlink')
bio = io.BytesIO()
with tarfile.open(fileobj=bio, mode='w|', dereference=False, encoding='utf-8') as tar:
# Note that without both name (bytes) and arcname (unicode), this either fails for
# Python 2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
# form) it works with Python 2.7, 3.5, 3.6, and 3.7 up to 3.11
tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
tarinfo.uid = user_id
tarinfo.uname = ''
if user_name:
tarinfo.uname = user_name
tarinfo.gid = group_id
tarinfo.gname = ''
tarinfo.mode &= 0o700
if mode is not None:
tarinfo.mode = mode
if not tarinfo.issym():
raise DockerUnexpectedError('stat information is not for a symlink')
tar.addfile(tarinfo)
return bio.getvalue()
def _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
yield _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode, user_name)
def _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
if not stat.S_ISREG(file_stat.st_mode):
raise DockerUnexpectedError('stat information is not for a regular file')
tarinfo = tarfile.TarInfo()
tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
tarinfo.mode = (file_stat.st_mode & 0o700) if mode is None else mode
tarinfo.uid = user_id
tarinfo.gid = group_id
tarinfo.size = file_stat.st_size
tarinfo.mtime = file_stat.st_mtime
tarinfo.type = tarfile.REGTYPE
tarinfo.linkname = ''
if user_name:
tarinfo.uname = user_name
tarinfo_buf = tarinfo.tobuf()
total_size = len(tarinfo_buf)
yield tarinfo_buf
size = tarinfo.size
total_size += size
with open(b_in_path, 'rb') as f:
while size > 0:
to_read = min(size, 65536)
buf = f.read(to_read)
if not buf:
break
size -= len(buf)
yield buf
if size:
# If for some reason the file shrunk, fill up to the announced size with zeros.
# (If it enlarged, ignore the remainder.)
yield tarfile.NUL * size
remainder = tarinfo.size % tarfile.BLOCKSIZE
if remainder:
# We need to write a multiple of 512 bytes. Fill up with zeros.
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
total_size += tarfile.BLOCKSIZE - remainder
# End with two zeroed blocks
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
total_size += 2 * tarfile.BLOCKSIZE
remainder = total_size % tarfile.RECORDSIZE
if remainder > 0:
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
def _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=None):
tarinfo = tarfile.TarInfo()
tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
tarinfo.mode = mode
tarinfo.uid = user_id
tarinfo.gid = group_id
tarinfo.size = len(content)
try:
tarinfo.mtime = int(datetime.datetime.now().timestamp())
except AttributeError:
# Python 2 (or more precisely: Python < 3.3) has no timestamp(). Use the following
# expression for Python 2:
tarinfo.mtime = int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds())
tarinfo.type = tarfile.REGTYPE
tarinfo.linkname = ''
if user_name:
tarinfo.uname = user_name
tarinfo_buf = tarinfo.tobuf()
total_size = len(tarinfo_buf)
yield tarinfo_buf
total_size += len(content)
yield content
remainder = tarinfo.size % tarfile.BLOCKSIZE
if remainder:
# We need to write a multiple of 512 bytes. Fill up with zeros.
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
total_size += tarfile.BLOCKSIZE - remainder
# End with two zeroed blocks
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
total_size += 2 * tarfile.BLOCKSIZE
remainder = total_size % tarfile.RECORDSIZE
if remainder > 0:
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
def put_file(client, container, in_path, out_path, user_id, group_id, mode=None, user_name=None, follow_links=False):
"""Transfer a file from local to Docker container."""
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise DockerFileNotFound(
"file or module does not exist: %s" % to_native(in_path))
b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_dir, out_file = os.path.split(out_path)
if follow_links:
file_stat = os.stat(b_in_path)
else:
file_stat = os.lstat(b_in_path)
if stat.S_ISREG(file_stat.st_mode):
stream = _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
elif stat.S_ISLNK(file_stat.st_mode):
stream = _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
else:
raise DockerFileCopyError(
'File{0} {1} is neither a regular file nor a symlink (stat mode {2}).'.format(
' referenced by' if follow_links else '', in_path, oct(file_stat.st_mode)))
ok = _put_archive(client, container, out_dir, stream)
if not ok:
raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
def put_file_content(client, container, content, out_path, user_id, group_id, mode, user_name=None):
"""Transfer a file from local to Docker container."""
out_dir, out_file = os.path.split(out_path)
stream = _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=user_name)
ok = _put_archive(client, container, out_dir, stream)
if not ok:
raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
def stat_file(client, container, in_path, follow_links=False, log=None):
"""Fetch information on a file from a Docker container to local.
Return a tuple ``(path, stat_data, link_target)`` where:
:path: is the resolved path in case ``follow_links=True``;
:stat_data: is ``None`` if the file does not exist, or a dictionary with fields
``name`` (string), ``size`` (integer), ``mode`` (integer, see https://pkg.go.dev/io/fs#FileMode),
``mtime`` (string), and ``linkTarget`` (string);
:link_target: is ``None`` if the file is not a symlink or when ``follow_links=False``,
and a string with the symlink target otherwise.
"""
considered_in_paths = set()
while True:
if in_path in considered_in_paths:
raise DockerFileCopyError('Found infinite symbolic link loop when trying to stating "{0}"'.format(in_path))
considered_in_paths.add(in_path)
if log:
log('FETCH: Stating "%s"' % in_path)
response = client._head(
client._url('/containers/{0}/archive', container),
params={'path': in_path},
)
if response.status_code == 404:
return in_path, None, None
client._raise_for_status(response)
header = response.headers.get('x-docker-container-path-stat')
try:
stat_data = json.loads(base64.b64decode(header))
except Exception as exc:
raise DockerUnexpectedError(
'When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}'
.format(in_path=in_path, container=container, header=header, exc=exc)
)
# https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink
if stat_data['mode'] & (1 << (32 - 5)) != 0:
link_target = stat_data['linkTarget']
if not follow_links:
return in_path, stat_data, link_target
in_path = os.path.join(os.path.split(in_path)[0], link_target)
continue
return in_path, stat_data, None
class _RawGeneratorFileobj(io.RawIOBase):
def __init__(self, stream):
self._stream = stream
self._buf = b''
def readable(self):
return True
def _readinto_from_buf(self, b, index, length):
cpy = min(length - index, len(self._buf))
if cpy:
b[index:index + cpy] = self._buf[:cpy]
self._buf = self._buf[cpy:]
index += cpy
return index
def readinto(self, b):
index = 0
length = len(b)
index = self._readinto_from_buf(b, index, length)
if index == length:
return index
try:
self._buf += next(self._stream)
except StopIteration:
return index
return self._readinto_from_buf(b, index, length)
def _stream_generator_to_fileobj(stream):
'''Given a generator that generates chunks of bytes, create a readable buffered stream.'''
raw = _RawGeneratorFileobj(stream)
return io.BufferedReader(raw)
def fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=False, log=None):
"""Fetch a file (as a tar file entry) from a Docker container to local."""
considered_in_paths = set()
while True:
if in_path in considered_in_paths:
raise DockerFileCopyError('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path))
considered_in_paths.add(in_path)
if log:
log('FETCH: Fetching "%s"' % in_path)
try:
stream = client.get_raw_stream(
'/containers/{0}/archive', container,
params={'path': in_path},
headers={'Accept-Encoding': 'identity'},
)
except NotFound:
return process_none(in_path)
with tarfile.open(fileobj=_stream_generator_to_fileobj(stream), mode='r|') as tar:
symlink_member = None
result = None
found = False
for member in tar:
if found:
raise DockerUnexpectedError('Received tarfile contains more than one file!')
found = True
if member.issym():
symlink_member = member
continue
if member.isfile():
result = process_regular(in_path, tar, member)
continue
result = process_other(in_path, member)
if symlink_member:
if not follow_links:
return process_symlink(in_path, symlink_member)
in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
if log:
log('FETCH: Following symbolic link to "%s"' % in_path)
continue
if found:
return result
raise DockerUnexpectedError('Received tarfile is empty!')
def fetch_file(client, container, in_path, out_path, follow_links=False, log=None):
b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
def process_none(in_path):
raise DockerFileNotFound(
'File {in_path} does not exist in container {container}'
.format(in_path=in_path, container=container)
)
def process_regular(in_path, tar, member):
if not follow_links and os.path.exists(b_out_path):
os.unlink(b_out_path)
in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
with open(b_out_path, 'wb') as out_f:
shutil.copyfileobj(in_f, out_f)
return in_path
def process_symlink(in_path, member):
if os.path.exists(b_out_path):
os.unlink(b_out_path)
os.symlink(member.linkname, b_out_path)
return in_path
def process_other(in_path, member):
raise DockerFileCopyError('Remote file "%s" is not a regular file or a symbolic link' % in_path)
return fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=follow_links, log=log)
def _execute_command(client, container, command, log=None, check_rc=False):
if log:
log('Executing {command} in {container}'.format(command=command, container=container))
data = {
'Container': container,
'User': '',
'Privileged': False,
'Tty': False,
'AttachStdin': False,
'AttachStdout': True,
'AttachStderr': True,
'Cmd': command,
}
if 'detachKeys' in client._general_configs:
data['detachKeys'] = client._general_configs['detachKeys']
try:
exec_data = client.post_json_to_json('/containers/{0}/exec', container, data=data)
except NotFound as e:
raise_from(
DockerFileCopyError('Could not find container "{container}"'.format(container=container)),
e,
)
except APIError as e:
if e.response is not None and e.response.status_code == 409:
raise_from(
DockerFileCopyError('Cannot execute command in paused container "{container}"'.format(container=container)),
e,
)
raise
exec_id = exec_data['Id']
data = {
'Tty': False,
'Detach': False
}
stdout, stderr = client.post_json_to_stream('/exec/{0}/start', exec_id, stream=False, demux=True, tty=False)
result = client.get_json('/exec/{0}/json', exec_id)
rc = result.get('ExitCode') or 0
stdout = stdout or b''
stderr = stderr or b''
if log:
log('Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}'.format(rc=rc, stdout=stdout, stderr=stderr))
if check_rc and rc != 0:
raise DockerUnexpectedError(
'Obtained unexpected exit code {rc} when running "{command}" in {container}.\nSTDOUT: {stdout}\nSTDERR: {stderr}'
.format(command=' '.join(command), container=container, rc=rc, stdout=stdout, stderr=stderr)
)
return rc, stdout, stderr
def determine_user_group(client, container, log=None):
dummy, stdout, stderr = _execute_command(client, container, ['/bin/sh', '-c', 'id -u && id -g'], check_rc=True, log=log)
stdout_lines = stdout.splitlines()
if len(stdout_lines) != 2:
raise DockerUnexpectedError(
'Expected two-line output to obtain user and group ID for container {container}, but got {lc} lines:\n{stdout}'
.format(container=container, lc=len(stdout_lines), stdout=stdout)
)
user_id, group_id = stdout_lines
try:
return int(user_id), int(group_id)
except ValueError:
raise DockerUnexpectedError(
'Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got "{l1}" and "{l2}" instead'
.format(container=container, l1=user_id, l2=group_id)
)

View File

@ -0,0 +1,869 @@
#!/usr/bin/python
#
# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: docker_container_copy_into
short_description: Copy a file into a Docker container
description:
- Copy a file into a Docker container.
- Similar to C(docker cp).
- To copy files in a non-running container, you must provide the I(owner_id) and I(group_id) options.
This is also necessary if the container does not contain a C(/bin/sh) shell with an C(id) tool.
attributes:
check_mode:
support: full
diff_mode:
support: full
details:
- Additional data will need to be transferred to compute diffs.
- The module uses R(the MAX_FILE_SIZE_FOR_DIFF ansible-core configuration,MAX_FILE_SIZE_FOR_DIFF)
to determine for how large files diffs should be computed.
options:
container:
description:
- The name of the container to copy files to.
type: str
required: true
path:
description:
- Path to a file on the managed node.
- Mutually exclusive with I(content). One of I(content) and I(path) is required.
type: path
content:
description:
- The file's content.
- If you plan to provide binary data, provide it pre-encoded to base64, and set I(content_is_b64=true).
- Mutually exclusive with I(path). One of I(content) and I(path) is required.
type: str
content_is_b64:
description:
- If set to C(true), the content in I(content) is assumed to be Base64 encoded and
will be decoded before being used.
- To use binary I(content), it is better to keep it Base64 encoded and let it
be decoded by this option. Otherwise you risk the data to be interpreted as
UTF-8 and corrupted.
type: bool
default: false
container_path:
description:
- Path to a file inside the Docker container.
- Must be an absolute path.
type: str
required: true
follow:
description:
- This flag indicates that filesystem links in the Docker container, if they exist, should be followed.
type: bool
default: false
local_follow:
description:
- This flag indicates that filesystem links in the source tree (where the module is executed), if they exist, should be followed.
type: bool
default: true
owner_id:
description:
- The owner ID to use when writing the file to disk.
- If provided, I(group_id) must also be provided.
- If not provided, the module will try to determine the user and group ID for the current user in the container.
This will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available.
Also the container must be running.
type: int
group_id:
description:
- The group ID to use when writing the file to disk.
- If provided, I(owner_id) must also be provided.
- If not provided, the module will try to determine the user and group ID for the current user in the container.
This will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available.
Also the container must be running.
type: int
mode:
description:
- The file mode to use when writing the file to disk.
- Will use the file's mode from the source system if this option is not provided.
type: int
force:
description:
- If set to C(true), force writing the file (without performing any idempotency checks).
- If set to C(false), only write the file if it does not exist on the target. If a filesystem object exists at
the destination, the module will not do any change.
- If this option is not specified, the module will be idempotent. To verify idempotency, it will try to get information
on the filesystem object in the container, and if everything seems to match will download the file from the container
to compare it to the file to upload.
type: bool
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
author:
- "Felix Fontein (@felixfontein)"
requirements:
- "Docker API >= 1.25"
'''
EXAMPLES = '''
- name: Copy a file into the container
community.docker.docker_container_copy_into:
container: mydata
path: /home/user/data.txt
container_path: /data/input.txt
- name: Copy a file into the container with owner, group, and mode set
community.docker.docker_container_copy_into:
container: mydata
path: /home/user/bin/runme.o
container_path: /bin/runme
owner: 0 # root
group: 0 # root
mode: 0o755 # readable and executable by all users, writable by root
'''
RETURN = '''
container_path:
description:
- The actual path in the container.
- Can only be different from I(container_path) when I(follow=true).
type: str
returned: success
'''
import base64
import io
import os
import random
import stat
import traceback
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.copy import (
DockerFileCopyError,
DockerFileNotFound,
DockerUnexpectedError,
determine_user_group,
fetch_file_ex,
put_file,
put_file_content,
stat_file,
)
from ansible_collections.community.docker.plugins.module_utils._scramble import generate_insecure_key, scramble
def are_fileobjs_equal(f1, f2):
'''Given two (buffered) file objects, compare their contents.'''
blocksize = 65536
b1buf = b''
b2buf = b''
while True:
if f1 and len(b1buf) < blocksize:
f1b = f1.read(blocksize)
if not f1b:
# f1 is EOF, so stop reading from it
f1 = None
b1buf += f1b
if f2 and len(b2buf) < blocksize:
f2b = f2.read(blocksize)
if not f2b:
# f2 is EOF, so stop reading from it
f2 = None
b2buf += f2b
if not b1buf or not b2buf:
# At least one of f1 and f2 is EOF and all its data has
# been processed. If both are EOF and their data has been
# processed, the files are equal, otherwise not.
return not b1buf and not b2buf
# Compare the next chunk of data, and remove it from the buffers
buflen = min(len(b1buf), len(b2buf))
if b1buf[:buflen] != b2buf[:buflen]:
return False
b1buf = b1buf[buflen:]
b2buf = b2buf[buflen:]
def are_fileobjs_equal_read_first(f1, f2):
'''Given two (buffered) file objects, compare their contents.
Returns a tuple (is_equal, content_of_f1), where the first element indicates
whether the two file objects have the same content, and the second element is
the content of the first file object.'''
blocksize = 65536
b1buf = b''
b2buf = b''
is_equal = True
content = []
while True:
if f1 and len(b1buf) < blocksize:
f1b = f1.read(blocksize)
if not f1b:
# f1 is EOF, so stop reading from it
f1 = None
b1buf += f1b
if f2 and len(b2buf) < blocksize:
f2b = f2.read(blocksize)
if not f2b:
# f2 is EOF, so stop reading from it
f2 = None
b2buf += f2b
if not b1buf or not b2buf:
# At least one of f1 and f2 is EOF and all its data has
# been processed. If both are EOF and their data has been
# processed, the files are equal, otherwise not.
is_equal = not b1buf and not b2buf
break
# Compare the next chunk of data, and remove it from the buffers
buflen = min(len(b1buf), len(b2buf))
if b1buf[:buflen] != b2buf[:buflen]:
is_equal = False
break
content.append(b1buf[:buflen])
b1buf = b1buf[buflen:]
b2buf = b2buf[buflen:]
content.append(b1buf)
if f1:
content.append(f1.read())
return is_equal, b''.join(content)
def is_container_file_not_regular_file(container_stat):
for bit in (
# https://pkg.go.dev/io/fs#FileMode
32 - 1, # ModeDir
32 - 4, # ModeTemporary
32 - 5, # ModeSymlink
32 - 6, # ModeDevice
32 - 7, # ModeNamedPipe
32 - 8, # ModeSocket
32 - 11, # ModeCharDevice
32 - 13, # ModeIrregular
):
if container_stat['mode'] & (1 << bit) != 0:
return True
return False
def get_container_file_mode(container_stat):
mode = container_stat['mode'] & 0xFFF
if container_stat['mode'] & (1 << (32 - 9)) != 0: # ModeSetuid
mode |= stat.S_ISUID # set UID bit
if container_stat['mode'] & (1 << (32 - 10)) != 0: # ModeSetgid
mode |= stat.S_ISGID # set GID bit
if container_stat['mode'] & (1 << (32 - 12)) != 0: # ModeSticky
mode |= stat.S_ISVTX # sticky bit
return mode
def add_other_diff(diff, in_path, member):
if diff is None:
return
diff['before_header'] = in_path
if member.isdir():
diff['before'] = '(directory)'
elif member.issym() or member.islnk():
diff['before'] = member.linkname
elif member.ischr():
diff['before'] = '(character device)'
elif member.isblk():
diff['before'] = '(block device)'
elif member.isfifo():
diff['before'] = '(fifo)'
elif member.isdev():
diff['before'] = '(device)'
elif member.isfile():
raise DockerUnexpectedError('should not be a regular file')
else:
diff['before'] = '(unknown filesystem object)'
def retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat=None, link_target=None):
if diff is None:
return
if regular_stat is not None:
# First handle all filesystem object types that are not regular files
if regular_stat['mode'] & (1 << (32 - 1)) != 0:
diff['before_header'] = container_path
diff['before'] = '(directory)'
return
elif regular_stat['mode'] & (1 << (32 - 4)) != 0:
diff['before_header'] = container_path
diff['before'] = '(temporary file)'
return
elif regular_stat['mode'] & (1 << (32 - 5)) != 0:
diff['before_header'] = container_path
diff['before'] = link_target
return
elif regular_stat['mode'] & (1 << (32 - 6)) != 0:
diff['before_header'] = container_path
diff['before'] = '(device)'
return
elif regular_stat['mode'] & (1 << (32 - 7)) != 0:
diff['before_header'] = container_path
diff['before'] = '(named pipe)'
return
elif regular_stat['mode'] & (1 << (32 - 8)) != 0:
diff['before_header'] = container_path
diff['before'] = '(socket)'
return
elif regular_stat['mode'] & (1 << (32 - 11)) != 0:
diff['before_header'] = container_path
diff['before'] = '(character device)'
return
elif regular_stat['mode'] & (1 << (32 - 13)) != 0:
diff['before_header'] = container_path
diff['before'] = '(unknown filesystem object)'
return
# Check whether file is too large
if regular_stat['size'] > max_file_size_for_diff > 0:
diff['dst_larger'] = max_file_size_for_diff
return
# We need to get hold of the content
def process_none(in_path):
diff['before'] = ''
def process_regular(in_path, tar, member):
add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
def process_symlink(in_path, member):
diff['before_header'] = in_path
diff['before'] = member.linkname
def process_other(in_path, member):
add_other_diff(diff, in_path, member)
fetch_file_ex(
client,
container,
in_path=container_path,
process_none=process_none,
process_regular=process_regular,
process_symlink=process_symlink,
process_other=process_other,
follow_links=follow_links,
)
def is_binary(content):
if b'\x00' in content:
return True
# TODO: better detection
# (ansible-core also just checks for 0x00, and even just sticks to the first 8k, so this isn't too bad...)
return False
def are_fileobjs_equal_with_diff_of_first(f1, f2, size, diff, max_file_size_for_diff, container_path):
if diff is None:
return are_fileobjs_equal(f1, f2)
if size > max_file_size_for_diff > 0:
diff['dst_larger'] = max_file_size_for_diff
return are_fileobjs_equal(f1, f2)
is_equal, content = are_fileobjs_equal_read_first(f1, f2)
if is_binary(content):
diff['dst_binary'] = 1
else:
diff['before_header'] = container_path
diff['before'] = to_text(content)
return is_equal
def add_diff_dst_from_regular_member(diff, max_file_size_for_diff, container_path, tar, member):
if diff is None:
return
if member.size > max_file_size_for_diff > 0:
diff['dst_larger'] = max_file_size_for_diff
return
tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
content = tar_f.read()
if is_binary(content):
diff['dst_binary'] = 1
else:
diff['before_header'] = container_path
diff['before'] = to_text(content)
def copy_dst_to_src(diff):
if diff is None:
return
for f, t in [
('dst_size', 'src_size'),
('dst_binary', 'src_binary'),
('before_header', 'after_header'),
('before', 'after'),
]:
if f in diff:
diff[t] = diff[f]
elif t in diff:
diff.pop(t)
def is_file_idempotent(client, container, managed_path, container_path, follow_links, local_follow_links, owner_id, group_id, mode,
force=False, diff=None, max_file_size_for_diff=1):
# Retrieve information of local file
try:
file_stat = os.stat(managed_path) if local_follow_links else os.lstat(managed_path)
except OSError as exc:
if exc.errno == 2:
raise DockerFileNotFound('Cannot find local file {managed_path}'.format(managed_path=managed_path))
raise
if mode is None:
mode = stat.S_IMODE(file_stat.st_mode)
if not stat.S_ISLNK(file_stat.st_mode) and not stat.S_ISREG(file_stat.st_mode):
raise DockerFileCopyError('Local path {managed_path} is not a symbolic link or file')
if diff is not None:
if file_stat.st_size > max_file_size_for_diff > 0:
diff['src_larger'] = max_file_size_for_diff
elif stat.S_ISLNK(file_stat.st_mode):
diff['after_header'] = managed_path
diff['after'] = os.readlink(managed_path)
else:
with open(managed_path, 'rb') as f:
content = f.read()
if is_binary(content):
diff['src_binary'] = 1
else:
diff['after_header'] = managed_path
diff['after'] = to_text(content)
# When forcing and we're not following links in the container, go!
if force and not follow_links:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
return container_path, mode, False
# Resolve symlinks in the container (if requested), and get information on container's file
real_container_path, regular_stat, link_target = stat_file(
client,
container,
in_path=container_path,
follow_links=follow_links,
)
# Follow links in the Docker container?
if follow_links:
container_path = real_container_path
# If the file wasn't found, continue
if regular_stat is None:
if diff is not None:
diff['before_header'] = container_path
diff['before'] = ''
return container_path, mode, False
# When forcing, go!
if force:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
# If force is set to False, and the destination exists, assume there's nothing to do
if force is False:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
copy_dst_to_src(diff)
return container_path, mode, True
# Basic idempotency checks
if stat.S_ISLNK(file_stat.st_mode):
if link_target is None:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
local_link_target = os.readlink(managed_path)
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, local_link_target == link_target
if link_target is not None:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if is_container_file_not_regular_file(regular_stat):
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if file_stat.st_size != regular_stat['size']:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if mode != get_container_file_mode(regular_stat):
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
# Fetch file from container
def process_none(in_path):
return container_path, mode, False
def process_regular(in_path, tar, member):
# Check things like user/group ID and mode
if any([
member.mode & 0xFFF != mode,
member.uid != owner_id,
member.gid != group_id,
not stat.S_ISREG(file_stat.st_mode),
member.size != file_stat.st_size,
]):
add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
return container_path, mode, False
tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
with open(managed_path, 'rb') as local_f:
is_equal = are_fileobjs_equal_with_diff_of_first(tar_f, local_f, member.size, diff, max_file_size_for_diff, in_path)
return container_path, mode, is_equal
def process_symlink(in_path, member):
if diff is not None:
diff['before_header'] = in_path
diff['before'] = member.linkname
# Check things like user/group ID and mode
if member.mode & 0xFFF != mode:
return container_path, mode, False
if member.uid != owner_id:
return container_path, mode, False
if member.gid != group_id:
return container_path, mode, False
if not stat.S_ISLNK(file_stat.st_mode):
return container_path, mode, False
local_link_target = os.readlink(managed_path)
return container_path, mode, member.linkname == local_link_target
def process_other(in_path, member):
add_other_diff(diff, in_path, member)
return container_path, mode, False
return fetch_file_ex(
client,
container,
in_path=container_path,
process_none=process_none,
process_regular=process_regular,
process_symlink=process_symlink,
process_other=process_other,
follow_links=follow_links,
)
def copy_file_into_container(client, container, managed_path, container_path, follow_links, local_follow_links,
owner_id, group_id, mode, force=False, diff=False, max_file_size_for_diff=1):
if diff:
diff = {}
else:
diff = None
container_path, mode, idempotent = is_file_idempotent(
client,
container,
managed_path,
container_path,
follow_links,
local_follow_links,
owner_id,
group_id,
mode,
force=force,
diff=diff,
max_file_size_for_diff=max_file_size_for_diff,
)
changed = not idempotent
if changed and not client.module.check_mode:
put_file(
client,
container,
in_path=managed_path,
out_path=container_path,
user_id=owner_id,
group_id=group_id,
mode=mode,
follow_links=local_follow_links,
)
result = dict(
container_path=container_path,
changed=changed,
)
if diff:
result['diff'] = diff
client.module.exit_json(**result)
def is_content_idempotent(client, container, content, container_path, follow_links, owner_id, group_id, mode,
force=False, diff=None, max_file_size_for_diff=1):
if diff is not None:
if len(content) > max_file_size_for_diff > 0:
diff['src_larger'] = max_file_size_for_diff
elif is_binary(content):
diff['src_binary'] = 1
else:
diff['after_header'] = 'dynamically generated'
diff['after'] = to_text(content)
# When forcing and we're not following links in the container, go!
if force and not follow_links:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
return container_path, mode, False
# Resolve symlinks in the container (if requested), and get information on container's file
real_container_path, regular_stat, link_target = stat_file(
client,
container,
in_path=container_path,
follow_links=follow_links,
)
# Follow links in the Docker container?
if follow_links:
container_path = real_container_path
# If the file wasn't found, continue
if regular_stat is None:
if diff is not None:
diff['before_header'] = container_path
diff['before'] = ''
return container_path, mode, False
# When forcing, go!
if force:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
# If force is set to False, and the destination exists, assume there's nothing to do
if force is False:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
copy_dst_to_src(diff)
return container_path, mode, True
# Basic idempotency checks
if link_target is not None:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if is_container_file_not_regular_file(regular_stat):
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if len(content) != regular_stat['size']:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if mode != get_container_file_mode(regular_stat):
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
# Fetch file from container
def process_none(in_path):
if diff is not None:
diff['before'] = ''
return container_path, mode, False
def process_regular(in_path, tar, member):
# Check things like user/group ID and mode
if any([
member.mode & 0xFFF != mode,
member.uid != owner_id,
member.gid != group_id,
member.size != len(content),
]):
add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
return container_path, mode, False
tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
is_equal = are_fileobjs_equal_with_diff_of_first(tar_f, io.BytesIO(content), member.size, diff, max_file_size_for_diff, in_path)
return container_path, mode, is_equal
def process_symlink(in_path, member):
if diff is not None:
diff['before_header'] = in_path
diff['before'] = member.linkname
return container_path, mode, False
def process_other(in_path, member):
add_other_diff(diff, in_path, member)
return container_path, mode, False
return fetch_file_ex(
client,
container,
in_path=container_path,
process_none=process_none,
process_regular=process_regular,
process_symlink=process_symlink,
process_other=process_other,
follow_links=follow_links,
)
def copy_content_into_container(client, container, content, container_path, follow_links,
owner_id, group_id, mode, force=False, diff=False, max_file_size_for_diff=1):
if diff:
diff = {}
else:
diff = None
container_path, mode, idempotent = is_content_idempotent(
client,
container,
content,
container_path,
follow_links,
owner_id,
group_id,
mode,
force=force,
diff=diff,
max_file_size_for_diff=max_file_size_for_diff,
)
changed = not idempotent
if changed and not client.module.check_mode:
put_file_content(
client,
container,
content=content,
out_path=container_path,
user_id=owner_id,
group_id=group_id,
mode=mode,
)
result = dict(
container_path=container_path,
changed=changed,
)
if diff:
# Since the content is no_log, make sure that the before/after strings look sufficiently different
key = generate_insecure_key()
diff['scrambled_diff'] = base64.b64encode(key)
for k in ('before', 'after'):
if k in diff:
diff[k] = scramble(diff[k], key)
result['diff'] = diff
client.module.exit_json(**result)
def main():
argument_spec = dict(
container=dict(type='str', required=True),
path=dict(type='path'),
container_path=dict(type='str', required=True),
follow=dict(type='bool', default=False),
local_follow=dict(type='bool', default=True),
owner_id=dict(type='int'),
group_id=dict(type='int'),
mode=dict(type='int'),
force=dict(type='bool'),
content=dict(type='str', no_log=True),
content_is_b64=dict(type='bool', default=False),
# Undocumented parameters for use by the action plugin
_max_file_size_for_diff=dict(type='int'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
min_docker_api_version='1.20',
supports_check_mode=True,
mutually_exclusive=[('path', 'content')],
required_together=[('owner_id', 'group_id')],
required_by={
'content': ['mode'],
},
)
container = client.module.params['container']
managed_path = client.module.params['path']
container_path = client.module.params['container_path']
follow = client.module.params['follow']
local_follow = client.module.params['local_follow']
owner_id = client.module.params['owner_id']
group_id = client.module.params['group_id']
mode = client.module.params['mode']
force = client.module.params['force']
content = client.module.params['content']
max_file_size_for_diff = client.module.params['_max_file_size_for_diff'] or 1
if content is not None:
if client.module.params['content_is_b64']:
try:
content = base64.b64decode(content)
except Exception as e: # depending on Python version and error, multiple different exceptions can be raised
client.fail('Cannot Base64 decode the content option: {0}'.format(e))
else:
content = to_bytes(content)
if not container_path.startswith(os.path.sep):
container_path = os.path.join(os.path.sep, container_path)
container_path = os.path.normpath(container_path)
try:
if owner_id is None or group_id is None:
owner_id, group_id = determine_user_group(client, container)
if content is not None:
copy_content_into_container(
client,
container,
content,
container_path,
follow_links=follow,
owner_id=owner_id,
group_id=group_id,
mode=mode,
force=force,
diff=client.module._diff,
max_file_size_for_diff=max_file_size_for_diff,
)
elif managed_path is not None:
copy_file_into_container(
client,
container,
managed_path,
container_path,
follow_links=follow,
local_follow_links=local_follow,
owner_id=owner_id,
group_id=group_id,
mode=mode,
force=force,
diff=client.module._diff,
max_file_size_for_diff=max_file_size_for_diff,
)
else:
# Can happen if a user explicitly passes `content: null` or `path: null`...
client.fail('One of path and content must be supplied')
except NotFound as exc:
client.fail('Could not find container "{1}" or resource in it ({0})'.format(exc, container))
except APIError as exc:
client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc())
except DockerException as exc:
client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc())
except RequestException as exc:
client.fail(
'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}'.format(exc, container),
exception=traceback.format_exc())
except DockerUnexpectedError as exc:
client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc())
except DockerFileCopyError as exc:
client.fail(to_native(exc))
except OSError as exc:
client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,6 @@
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
azp/4
destructive

View File

@ -0,0 +1,8 @@
---
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
dependencies:
- setup_docker
- setup_remote_tmp_dir

View File

@ -0,0 +1,45 @@
---
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
####################################################################
# WARNING: These are designed specifically for Ansible tests #
# and should not be used as examples of how to write Ansible roles #
####################################################################
- name: Gather facts on controller
setup:
gather_subset: '!all'
delegate_to: localhost
delegate_facts: true
run_once: true
# Create random name prefix (for containers)
- name: Create random container name prefix
set_fact:
cname_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
cnames: []
- debug:
msg: "Using container name prefix {{ cname_prefix }}"
# Run the tests
- block:
- include_tasks: run-test.yml
with_fileglob:
- "tests/*.yml"
always:
- name: "Make sure all containers are removed"
docker_container:
name: "{{ item }}"
state: absent
force_kill: true
with_items: "{{ cnames }}"
diff: false
when: docker_api_version is version('1.25', '>=')
- fail: msg="Too old Docker API version to run all docker_container_copy_into tests!"
when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)

View File

@ -0,0 +1,7 @@
---
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- name: "Loading tasks from {{ item }}"
include_tasks: "{{ item }}"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -43,7 +43,12 @@
- nginx.conf
- name: Copy static files into volume
command: docker cp {{ remote_tmp_dir }}/{{ item }} {{ daemon_nginx_frontend }}:/etc/nginx/{{ item }}
docker_container_copy_into:
container: '{{ daemon_nginx_frontend }}'
path: '{{ remote_tmp_dir }}/{{ item }}'
container_path: '/etc/nginx/{{ item }}'
owner_id: 0
group_id: 0
loop:
- nginx.conf
register: can_copy_files
@ -94,7 +99,12 @@
provider: ownca
- name: Copy dynamic files into volume
command: docker cp {{ remote_tmp_dir }}/{{ item }} {{ daemon_nginx_frontend }}:/etc/nginx/{{ item }}
docker_container_copy_into:
container: '{{ daemon_nginx_frontend }}'
path: '{{ remote_tmp_dir }}/{{ item }}'
container_path: '/etc/nginx/{{ item }}'
owner_id: 0
group_id: 0
loop:
- ca.pem
- cert.pem

View File

@ -39,7 +39,12 @@
- nginx.htpasswd
- name: Copy static files into volume
command: docker cp {{ remote_tmp_dir }}/{{ item }} {{ docker_registry_container_name_frontend }}:/etc/nginx/{{ item }}
docker_container_copy_into:
container: '{{ docker_registry_container_name_frontend }}'
path: '{{ remote_tmp_dir }}/{{ item }}'
container_path: '/etc/nginx/{{ item }}'
owner_id: 0
group_id: 0
loop:
- nginx.conf
- nginx.htpasswd
@ -71,7 +76,12 @@
provider: selfsigned
- name: Copy dynamic files into volume
command: docker cp {{ remote_tmp_dir }}/{{ item }} {{ docker_registry_container_name_frontend }}:/etc/nginx/{{ item }}
docker_container_copy_into:
container: '{{ docker_registry_container_name_frontend }}'
path: '{{ remote_tmp_dir }}/{{ item }}'
container_path: '/etc/nginx/{{ item }}'
owner_id: 0
group_id: 0
loop:
- cert.pem
- cert.key

View File

@ -8,3 +8,4 @@ plugins/modules/current_container_facts.py validate-modules:return-syntax-error
plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax
plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax
plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax
plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin

View File

@ -8,3 +8,4 @@ plugins/modules/current_container_facts.py validate-modules:return-syntax-error
plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax
plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax
plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax
plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin

View File

@ -1,2 +1,3 @@
.azure-pipelines/scripts/publish-codecov.py replace-urlopen
plugins/modules/current_container_facts.py validate-modules:return-syntax-error
plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin

View File

@ -1 +1,2 @@
.azure-pipelines/scripts/publish-codecov.py replace-urlopen
plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin

View File

@ -1 +1,2 @@
.azure-pipelines/scripts/publish-codecov.py replace-urlopen
plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin

View File

@ -1 +1,2 @@
.azure-pipelines/scripts/publish-codecov.py replace-urlopen
plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin

View File

@ -7,3 +7,4 @@
plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax
plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax
plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax
plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin

View File

@ -0,0 +1,29 @@
# Copyright 2022 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import tarfile
from ansible_collections.community.docker.plugins.module_utils._scramble import (
scramble,
unscramble,
)
@pytest.mark.parametrize('plaintext, key, scrambled', [
(u'', b'0', '=S='),
(u'hello', b'\x00', '=S=aGVsbG8='),
(u'hello', b'\x01', '=S=aWRtbW4='),
])
def test_scramble_unscramble(plaintext, key, scrambled):
scrambled_ = scramble(plaintext, key)
print('{0!r} == {1!r}'.format(scrambled_, scrambled))
assert scrambled_ == scrambled
plaintext_ = unscramble(scrambled, key)
print('{0!r} == {1!r}'.format(plaintext_, plaintext))
assert plaintext_ == plaintext

View File

@ -0,0 +1,78 @@
# Copyright 2022 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import tarfile
from ansible_collections.community.docker.plugins.module_utils.copy import (
_stream_generator_to_fileobj,
)
def _simple_generator(sequence):
for elt in sequence:
yield elt
@pytest.mark.parametrize('chunks, read_sizes', [
(
[
(1, b'1'),
(1, b'2'),
(1, b'3'),
(1, b'4'),
],
[
1,
2,
3,
]
),
(
[
(1, b'123'),
(1, b'456'),
(1, b'789'),
],
[
1,
4,
2,
2,
2,
]
),
(
[
(10 * 1024 * 1024, b'0'),
(10 * 1024 * 1024, b'1'),
],
[
1024 * 1024 - 5,
5 * 1024 * 1024 - 3,
10 * 1024 * 1024 - 2,
2 * 1024 * 1024 - 1,
2 * 1024 * 1024 + 5 + 3 + 2 + 1,
]
),
])
def test__stream_generator_to_fileobj(chunks, read_sizes):
chunks = [count * data for count, data in chunks]
stream = _simple_generator(chunks)
expected = b''.join(chunks)
buffer = b''
totally_read = 0
f = _stream_generator_to_fileobj(stream)
for read_size in read_sizes:
chunk = f.read(read_size)
assert len(chunk) == min(read_size, len(expected) - len(buffer))
buffer += chunk
totally_read += read_size
assert buffer == expected[:len(buffer)]
assert min(totally_read, len(expected)) == len(buffer)