mirror of
https://github.com/ansible-collections/community.docker.git
synced 2025-12-13 10:32:06 +00:00
Compare commits
696 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
947ec9a442 | ||
|
|
25e7ba222e | ||
|
|
6ab8cc0d82 | ||
|
|
159df0ab91 | ||
|
|
174c0c8058 | ||
|
|
2efcd6b2ec | ||
|
|
faa7dee456 | ||
|
|
908c23a3c3 | ||
|
|
350f67d971 | ||
|
|
846fc8564b | ||
|
|
d2947476f7 | ||
|
|
5d2b4085ec | ||
|
|
a869184ad4 | ||
|
|
a985e05482 | ||
|
|
13e74e58fa | ||
|
|
c61c0e24b8 | ||
|
|
e42423b949 | ||
|
|
0d37f20100 | ||
|
|
a349c5eed7 | ||
|
|
3da2799e03 | ||
|
|
d207643e0c | ||
|
|
90c4b4c543 | ||
|
|
68993fe353 | ||
|
|
97314ec892 | ||
|
|
ec14568b22 | ||
|
|
94d22f758b | ||
|
|
aedf8f9674 | ||
|
|
86ea32b214 | ||
|
|
9d7dda7292 | ||
|
|
dee138bc4b | ||
|
|
00c480254d | ||
|
|
02f787a930 | ||
|
|
ea76592af6 | ||
|
|
dbc7b0ec18 | ||
|
|
3bade286f8 | ||
|
|
3dcf394aa5 | ||
|
|
7afd659459 | ||
|
|
54084defd0 | ||
|
|
95bdce75e6 | ||
|
|
b24bce77b6 | ||
|
|
be000755fc | ||
|
|
6ad4bfcd40 | ||
|
|
3350283bcc | ||
|
|
24f35644e3 | ||
|
|
6b5d76bdee | ||
|
|
3ff2cfe615 | ||
|
|
0646e52bae | ||
|
|
04fa3fe352 | ||
|
|
597162b153 | ||
|
|
6f9ebc3f14 | ||
|
|
16b5bfa27b | ||
|
|
17e30adb93 | ||
|
|
c75aa5dd64 | ||
|
|
cad22de628 | ||
|
|
33c8a49191 | ||
|
|
892e9d9cbd | ||
|
|
f7e976f3da | ||
|
|
e8ec22d3b1 | ||
|
|
741c318b1d | ||
|
|
a3efa26e2e | ||
|
|
db09affaea | ||
|
|
ec5f7682a1 | ||
|
|
0acb773127 | ||
|
|
acf18f0ade | ||
|
|
449b37e1c9 | ||
|
|
54c2e49fdf | ||
|
|
ebb8569b5f | ||
|
|
d0f4ef57a4 | ||
|
|
117271579e | ||
|
|
bb39e67c8f | ||
|
|
d65d37e9e9 | ||
|
|
f45232635c | ||
|
|
1f2817fa20 | ||
|
|
b9cf9015c4 | ||
|
|
d757294540 | ||
|
|
626426c199 | ||
|
|
251e4eca49 | ||
|
|
ebe42308cc | ||
|
|
82b49c7cf2 | ||
|
|
1902e0fdf2 | ||
|
|
de9794ffe8 | ||
|
|
8723784cf0 | ||
|
|
82b3184605 | ||
|
|
f8ea3fcba3 | ||
|
|
fd011d3871 | ||
|
|
8e2056fcb1 | ||
|
|
a3093604fa | ||
|
|
c9c420c036 | ||
|
|
1e038c072f | ||
|
|
ad7397a332 | ||
|
|
5e58fee998 | ||
|
|
2b5c06da20 | ||
|
|
93d165e10b | ||
|
|
3e2b149dc2 | ||
|
|
1f53619edf | ||
|
|
ba58752646 | ||
|
|
3d44b9569c | ||
|
|
fdf9f83454 | ||
|
|
68ac6fecb1 | ||
|
|
1ba34b9b7c | ||
|
|
0631d15656 | ||
|
|
8c66e6299c | ||
|
|
da76583d6b | ||
|
|
47197cf7d2 | ||
|
|
e1920d1cc7 | ||
|
|
cfd59ac9e5 | ||
|
|
c565698f09 | ||
|
|
449448e820 | ||
|
|
920015706b | ||
|
|
a45d5b6d2f | ||
|
|
0224e5faef | ||
|
|
e0139eee10 | ||
|
|
3913a9aec1 | ||
|
|
e0a4f37e31 | ||
|
|
89c58cd171 | ||
|
|
ac301beebd | ||
|
|
8365810b52 | ||
|
|
8b55159279 | ||
|
|
9fa966dca6 | ||
|
|
e1347723d1 | ||
|
|
c8fc5bc175 | ||
|
|
ebaf1c73ff | ||
|
|
c1df4bc8da | ||
|
|
ae1d457b49 | ||
|
|
d354f6b40d | ||
|
|
2e20028392 | ||
|
|
2108c3dc71 | ||
|
|
eba578cc92 | ||
|
|
e9f4553b01 | ||
|
|
8ecbd9a5cc | ||
|
|
72d827a9e2 | ||
|
|
5a992bb34d | ||
|
|
38591969dd | ||
|
|
c9835e9b65 | ||
|
|
9a93812d3b | ||
|
|
9a9372dd06 | ||
|
|
cdf02b642c | ||
|
|
295428167b | ||
|
|
e0b9c45579 | ||
|
|
ad989c1942 | ||
|
|
424b39fe36 | ||
|
|
961acd9120 | ||
|
|
62d0b3d1cb | ||
|
|
2487d1a0bf | ||
|
|
795e6b23dc | ||
|
|
3a3ece3ba5 | ||
|
|
545d99e7c1 | ||
|
|
5cbd81e7a7 | ||
|
|
e20118b68f | ||
|
|
8694f488d7 | ||
|
|
106c3d33d6 | ||
|
|
80329beade | ||
|
|
2d65015e86 | ||
|
|
585595187b | ||
|
|
a1e9412bed | ||
|
|
635716c07b | ||
|
|
c13b891bc9 | ||
|
|
9730b2a3c3 | ||
|
|
9972eee967 | ||
|
|
5b5b4e7204 | ||
|
|
7355c7de0d | ||
|
|
9d015a2563 | ||
|
|
a3f9c21228 | ||
|
|
fdb97428a3 | ||
|
|
ca7f3eb82f | ||
|
|
efe50114a7 | ||
|
|
799fe434e5 | ||
|
|
6a69fbc0b0 | ||
|
|
cfb970bd53 | ||
|
|
187014101b | ||
|
|
f99a9b618c | ||
|
|
e36ee04ea6 | ||
|
|
5ce4783053 | ||
|
|
ab53cb2e80 | ||
|
|
22ab85fe2b | ||
|
|
20042ea780 | ||
|
|
18ca4184cc | ||
|
|
3b6068e44b | ||
|
|
ea3ac5f195 | ||
|
|
bcd6e57450 | ||
|
|
511cfe52ca | ||
|
|
8bae4e9c6d | ||
|
|
ce074ba8f0 | ||
|
|
05eb3b90eb | ||
|
|
b1bba23507 | ||
|
|
9cc70f5202 | ||
|
|
9e26c4794e | ||
|
|
993d66971d | ||
|
|
b72e17cc53 | ||
|
|
29ff1241ce | ||
|
|
4aed658919 | ||
|
|
1a218f3c5e | ||
|
|
8c1e3eb5cf | ||
|
|
3da95fcebf | ||
|
|
0ae405a3e1 | ||
|
|
5bfec5d4d2 | ||
|
|
c10ae4a24d | ||
|
|
6172a9291c | ||
|
|
bd992583c2 | ||
|
|
769d15de63 | ||
|
|
f69536ef3b | ||
|
|
04c97728dc | ||
|
|
d17ee667ce | ||
|
|
742a373e59 | ||
|
|
e638e02124 | ||
|
|
18287220ab | ||
|
|
6a377eefdc | ||
|
|
e6bfd9bda3 | ||
|
|
8616e7f6f2 | ||
|
|
2e7b4e4605 | ||
|
|
80770ed972 | ||
|
|
7583ea82ac | ||
|
|
e19812917d | ||
|
|
d8548ef55f | ||
|
|
c294fa4063 | ||
|
|
6595d299e2 | ||
|
|
78bdccd453 | ||
|
|
8344999c0c | ||
|
|
e3b36e5f0a | ||
|
|
9b2a371c00 | ||
|
|
fb9784e4c7 | ||
|
|
c17fef37b3 | ||
|
|
385839d891 | ||
|
|
4157bd8269 | ||
|
|
1e10834905 | ||
|
|
6daeff69f6 | ||
|
|
9da9e35df7 | ||
|
|
90cf544dba | ||
|
|
a740cfa0c4 | ||
|
|
be5564d4de | ||
|
|
309a30e9be | ||
|
|
f7823ea626 | ||
|
|
8c5b90df55 | ||
|
|
0749d61513 | ||
|
|
9f55d1c5b7 | ||
|
|
28e87f4602 | ||
|
|
ea38591dec | ||
|
|
54d70d9afc | ||
|
|
1485adce29 | ||
|
|
0806996f82 | ||
|
|
423a9bbf61 | ||
|
|
d478174786 | ||
|
|
e87ad6188c | ||
|
|
bfb0fed227 | ||
|
|
ca648a0390 | ||
|
|
3802e424d9 | ||
|
|
d8cefc4190 | ||
|
|
37df0e8e28 | ||
|
|
df9f84f216 | ||
|
|
ca5fe4dc10 | ||
|
|
6791364105 | ||
|
|
dbe99e3a63 | ||
|
|
a4aa8d3224 | ||
|
|
d36768609d | ||
|
|
65ead853e7 | ||
|
|
e1e4d5df1a | ||
|
|
d797af0d67 | ||
|
|
3cc27ecd65 | ||
|
|
d91f854d45 | ||
|
|
41445def33 | ||
|
|
c3aceebd7d | ||
|
|
7464002bc3 | ||
|
|
0fe84b510b | ||
|
|
45b2531129 | ||
|
|
ebec16d42c | ||
|
|
2ddadf1e2b | ||
|
|
11ce793f7d | ||
|
|
22bbfbaf8b | ||
|
|
a30fd93a44 | ||
|
|
609fa2c8b4 | ||
|
|
852cf73e2d | ||
|
|
65d8dc8908 | ||
|
|
4b7e74b75e | ||
|
|
ec37166a6c | ||
|
|
8e8a091469 | ||
|
|
f9461bb441 | ||
|
|
4277b60340 | ||
|
|
8efbd560f9 | ||
|
|
6fcbd34e23 | ||
|
|
7ec56d33cb | ||
|
|
569880486f | ||
|
|
ff412f475e | ||
|
|
f69a29403b | ||
|
|
259f2cf8b7 | ||
|
|
48c0cdf2c5 | ||
|
|
e2f93a0c66 | ||
|
|
bbf163e61d | ||
|
|
9b5dbd4543 | ||
|
|
7fe2f57951 | ||
|
|
1713995bfc | ||
|
|
d98850e9e9 | ||
|
|
2ce838ab92 | ||
|
|
81cabbf697 | ||
|
|
165571f5cf | ||
|
|
7efc6381d0 | ||
|
|
37c639f6e8 | ||
|
|
d334c2362f | ||
|
|
ad9d362336 | ||
|
|
36dcb94b39 | ||
|
|
ace4ee4f70 | ||
|
|
08063a0439 | ||
|
|
eddeb91697 | ||
|
|
801d65c610 | ||
|
|
3383cd551e | ||
|
|
4cac2ac021 | ||
|
|
a5b5681608 | ||
|
|
6fc9727f60 | ||
|
|
691bc6de72 | ||
|
|
fd5110c94c | ||
|
|
0616fb12df | ||
|
|
4cb29220e5 | ||
|
|
a22e92cdc0 | ||
|
|
c2c47636b4 | ||
|
|
a3952f0068 | ||
|
|
9e7b5407fd | ||
|
|
de7729c33c | ||
|
|
205867e392 | ||
|
|
7867390473 | ||
|
|
54fd5284d9 | ||
|
|
260b2859c5 | ||
|
|
32612dc6ec | ||
|
|
1b50cee901 | ||
|
|
e242a41bda | ||
|
|
b9add7b415 | ||
|
|
570f5fb524 | ||
|
|
8cbec47816 | ||
|
|
ab8b6662c2 | ||
|
|
daa253a62d | ||
|
|
427a7d4f0c | ||
|
|
b6e698c1de | ||
|
|
97ea49cc17 | ||
|
|
16c345f6fd | ||
|
|
5016a96eba | ||
|
|
2eb2c9febf | ||
|
|
8cbdf5400c | ||
|
|
36c118d154 | ||
|
|
f2a5d6f872 | ||
|
|
e176a8a17b | ||
|
|
a4a05e7fa5 | ||
|
|
f51ca84197 | ||
|
|
9beac01ce1 | ||
|
|
30faf0b8e6 | ||
|
|
368d616229 | ||
|
|
f09a2540aa | ||
|
|
e2f293ce2d | ||
|
|
379ce23270 | ||
|
|
8bcc3519d4 | ||
|
|
6368854a8c | ||
|
|
cab1bcb96e | ||
|
|
1ee9109a73 | ||
|
|
8ad45286a3 | ||
|
|
9e8c367c47 | ||
|
|
2925334a1a | ||
|
|
9ff53bc143 | ||
|
|
7102d38923 | ||
|
|
8f3f310c78 | ||
|
|
7d120ab42e | ||
|
|
8ed1dddbba | ||
|
|
63483b2724 | ||
|
|
59a8220c7f | ||
|
|
61c54874fd | ||
|
|
bf1281ae7f | ||
|
|
14e2f92974 | ||
|
|
4bab9a6b0e | ||
|
|
6600f501ae | ||
|
|
83d2d0ef8e | ||
|
|
6aea7efed9 | ||
|
|
37e28b62d3 | ||
|
|
d4b654793e | ||
|
|
eafa7d03a8 | ||
|
|
a7c7adce2f | ||
|
|
bbc36e9923 | ||
|
|
45d32d53c9 | ||
|
|
6f5d67860c | ||
|
|
f0c91ef5f9 | ||
|
|
6366464812 | ||
|
|
e494464e56 | ||
|
|
b5de1fd1ad | ||
|
|
4c2e7ebfbc | ||
|
|
7b554082ea | ||
|
|
c97ac2337f | ||
|
|
9f35743ab9 | ||
|
|
810bf738d7 | ||
|
|
b5d085bb88 | ||
|
|
b5391c7971 | ||
|
|
cc32e0e6ad | ||
|
|
32cb76b164 | ||
|
|
eebb73a503 | ||
|
|
b2a79d9eb7 | ||
|
|
a53ecb6e66 | ||
|
|
901c1c4f9c | ||
|
|
564d3f389f | ||
|
|
fcf608b334 | ||
|
|
37d0a44c0b | ||
|
|
ac41379119 | ||
|
|
648e0652d5 | ||
|
|
b2cee5677a | ||
|
|
31540c43d6 | ||
|
|
eb3e0b17cd | ||
|
|
7129cc5a30 | ||
|
|
c3322fd55b | ||
|
|
6082efc855 | ||
|
|
22d956efa8 | ||
|
|
98a74b1f9c | ||
|
|
ab73061a5f | ||
|
|
d4a5280512 | ||
|
|
4dd671248c | ||
|
|
5eb115cb10 | ||
|
|
daf32ed6ec | ||
|
|
1c8272f821 | ||
|
|
5adac5216a | ||
|
|
b84c771fc5 | ||
|
|
f04cdb7e06 | ||
|
|
f429017d94 | ||
|
|
97a0610f25 | ||
|
|
307dc4045a | ||
|
|
8ca5e2f810 | ||
|
|
cb4dd2fed1 | ||
|
|
eed89f32eb | ||
|
|
4a5293503e | ||
|
|
5f9f78ede6 | ||
|
|
22d595eddb | ||
|
|
7d680aa102 | ||
|
|
5256f94342 | ||
|
|
7c61325a83 | ||
|
|
b774837183 | ||
|
|
762ce3e1cf | ||
|
|
7aa9791ea6 | ||
|
|
39717d380e | ||
|
|
2caa77c032 | ||
|
|
ce7402dc9f | ||
|
|
199d9e50d3 | ||
|
|
56bbef2b44 | ||
|
|
42453444ff | ||
|
|
02bb4ceaf7 | ||
|
|
c3f8f80a75 | ||
|
|
66b341aa9e | ||
|
|
20e78a92e0 | ||
|
|
e22cee2c41 | ||
|
|
8ee0452776 | ||
|
|
b1dfe49e7d | ||
|
|
0812d0b495 | ||
|
|
74636e7f0e | ||
|
|
48f48a0ef8 | ||
|
|
46e6070041 | ||
|
|
080a2d68c1 | ||
|
|
c4c347c626 | ||
|
|
b3ef5f5196 | ||
|
|
26772304f9 | ||
|
|
a120794958 | ||
|
|
3aa1ddcca0 | ||
|
|
4929ef603a | ||
|
|
80e39f84d8 | ||
|
|
907dc28f73 | ||
|
|
d8cef6c71e | ||
|
|
b8afdc52b1 | ||
|
|
cbdaab3e42 | ||
|
|
64847ad875 | ||
|
|
5630e3e4f3 | ||
|
|
a50be1abf6 | ||
|
|
1052ce2ded | ||
|
|
4c220c4d74 | ||
|
|
9ba09432a7 | ||
|
|
14683421b5 | ||
|
|
70695a8dcd | ||
|
|
ee054c6bf7 | ||
|
|
a0775fe194 | ||
|
|
1c66f880ee | ||
|
|
fbc2750b6a | ||
|
|
33c0957292 | ||
|
|
70ea796914 | ||
|
|
be610963b5 | ||
|
|
4d9b85c975 | ||
|
|
78801088ae | ||
|
|
2c633dadeb | ||
|
|
e7133f8b1b | ||
|
|
d9f49fc073 | ||
|
|
128117bb1c | ||
|
|
d266c69ddc | ||
|
|
6f6dd14492 | ||
|
|
e21d6d380c | ||
|
|
92fc542c00 | ||
|
|
017536953a | ||
|
|
0a8f3fa7d6 | ||
|
|
1d0b6ddef3 | ||
|
|
3f6d5a96d9 | ||
|
|
e04111550d | ||
|
|
285bbf54cb | ||
|
|
8da385f39f | ||
|
|
ad1da2dc6c | ||
|
|
377f0f7355 | ||
|
|
a1c5a2d342 | ||
|
|
024bdec919 | ||
|
|
f94beeb027 | ||
|
|
3f9f41e5a9 | ||
|
|
5b287b6650 | ||
|
|
e5d289a650 | ||
|
|
440669e76d | ||
|
|
861988fd36 | ||
|
|
cad2ecca3d | ||
|
|
748d619fb2 | ||
|
|
74b70f81c8 | ||
|
|
db71c974e3 | ||
|
|
a284137d15 | ||
|
|
cdccf955a8 | ||
|
|
1660bf4104 | ||
|
|
2259246f4f | ||
|
|
d7f7e44b9e | ||
|
|
7bdb2127e0 | ||
|
|
245ab76b09 | ||
|
|
6187068ee5 | ||
|
|
c3b523a11e | ||
|
|
ce16a0d5f1 | ||
|
|
39f2e9b9c4 | ||
|
|
5a26eee6d4 | ||
|
|
35f2d1617f | ||
|
|
054353bb14 | ||
|
|
a3642616c2 | ||
|
|
0475be5166 | ||
|
|
ba5b3306a7 | ||
|
|
4ab05500e3 | ||
|
|
088cbaed4e | ||
|
|
9e1a0a6fb8 | ||
|
|
634da44f67 | ||
|
|
5d61cb2b8d | ||
|
|
334d34e205 | ||
|
|
e0f4c33782 | ||
|
|
65407001ff | ||
|
|
d0a3e587a5 | ||
|
|
c504c87404 | ||
|
|
82dde7cadd | ||
|
|
18584f6b40 | ||
|
|
08bfcf7e5f | ||
|
|
d0e61097f1 | ||
|
|
dfa60dc91d | ||
|
|
5dd90e5884 | ||
|
|
dd19db8c8f | ||
|
|
a426232523 | ||
|
|
449b91d489 | ||
|
|
983b2b4783 | ||
|
|
5c70d8fd7a | ||
|
|
d2f551fc5d | ||
|
|
54a3dc151d | ||
|
|
eb186f0098 | ||
|
|
3a1bfc4be2 | ||
|
|
421bae419d | ||
|
|
0e1152a630 | ||
|
|
d57b26269a | ||
|
|
a78bd6f443 | ||
|
|
c0d9ca67c4 | ||
|
|
c24ea78f6e | ||
|
|
dc611a05d1 | ||
|
|
96b6f5917d | ||
|
|
b114d451fd | ||
|
|
c7cbec0163 | ||
|
|
01429108d3 | ||
|
|
4e6ac335f3 | ||
|
|
757b02cc15 | ||
|
|
e198e4ab43 | ||
|
|
134d32cae6 | ||
|
|
dc5af8985b | ||
|
|
18091193de | ||
|
|
311926aaad | ||
|
|
3470e5effb | ||
|
|
c6aca384ed | ||
|
|
faa7fef504 | ||
|
|
44b98609fd | ||
|
|
839ad6086e | ||
|
|
f3e77c193d | ||
|
|
5cb24d2e69 | ||
|
|
cc9191f8cb | ||
|
|
b6034929bd | ||
|
|
02915cd22c | ||
|
|
7e213200ce | ||
|
|
b8fb4740a3 | ||
|
|
970b95e3fa | ||
|
|
6db26903ed | ||
|
|
3a40112a76 | ||
|
|
b318c02148 | ||
|
|
f823555ae3 | ||
|
|
13968bda22 | ||
|
|
080a043c79 | ||
|
|
ed48629399 | ||
|
|
e87b327764 | ||
|
|
3ed7c56704 | ||
|
|
ffff6b7e77 | ||
|
|
11351839ee | ||
|
|
019712b09f | ||
|
|
6ccbde9f98 | ||
|
|
549de87ab5 | ||
|
|
2957138153 | ||
|
|
a239c0b2db | ||
|
|
6e04e1f172 | ||
|
|
166d485216 | ||
|
|
c2d84efccb | ||
|
|
ee9ddb954f | ||
|
|
90086f00ad | ||
|
|
5b4d24a817 | ||
|
|
9ab3130b43 | ||
|
|
edf0d3ec99 | ||
|
|
70d68dd2bd | ||
|
|
d043fc6cbc | ||
|
|
b2c86af64c | ||
|
|
20492e940f | ||
|
|
ce4b0ddcab | ||
|
|
f17e6d52bd | ||
|
|
0d42792f97 | ||
|
|
1427a7ccdd | ||
|
|
c7f5c74f15 | ||
|
|
2261dff49f | ||
|
|
7ea99edf07 | ||
|
|
79b05f5e1d | ||
|
|
5b31f17016 | ||
|
|
4e1bb64b0a | ||
|
|
51d5744cb0 | ||
|
|
1ac3a99e7c | ||
|
|
df864221d6 | ||
|
|
af854ed63b | ||
|
|
a380607717 | ||
|
|
25d44fe061 | ||
|
|
ac606cd2bf | ||
|
|
1e93feed2b | ||
|
|
e412c0d081 | ||
|
|
a309a1b2f0 | ||
|
|
a72d7795c4 | ||
|
|
3b41e7d6a8 | ||
|
|
9458bc6e62 | ||
|
|
d159479615 | ||
|
|
c9ea1d3f92 | ||
|
|
1e24120014 | ||
|
|
8254e72da0 | ||
|
|
68ea9c5f41 | ||
|
|
8eabc7fe00 | ||
|
|
d8297df7d0 | ||
|
|
ad05773e34 | ||
|
|
9b8c10b8cf | ||
|
|
03ecea94c0 | ||
|
|
91caf49988 | ||
|
|
6c9152567b | ||
|
|
fbac2ecc3d | ||
|
|
b720c8f486 | ||
|
|
f7cf12555c | ||
|
|
f9741b7457 | ||
|
|
9acc75be85 | ||
|
|
be4f333696 | ||
|
|
a50257381f | ||
|
|
f513ba2c59 | ||
|
|
5b76f05bef | ||
|
|
be58ccc13f | ||
|
|
1bf8da2390 | ||
|
|
74134eda33 | ||
|
|
1e4633a606 | ||
|
|
bc6757d3b8 | ||
|
|
b2bb064e47 | ||
|
|
da9b076904 | ||
|
|
ae708a7333 | ||
|
|
a33e51e04a | ||
|
|
e90647b209 | ||
|
|
cc55aeb882 | ||
|
|
9c5d562c0e | ||
|
|
6caaa3a90b | ||
|
|
ca3d7a3609 | ||
|
|
a4539a309e | ||
|
|
0e3d7d4802 | ||
|
|
e26890a909 | ||
|
|
5d0a036819 | ||
|
|
37c868e192 | ||
|
|
2f1d9b3ff9 | ||
|
|
77e63e2cca | ||
|
|
04121b5882 | ||
|
|
c00b4ec9be | ||
|
|
f6d4cad46e | ||
|
|
58211153db | ||
|
|
b90cc8b3f9 | ||
|
|
848e21d253 | ||
|
|
da9252a67e | ||
|
|
23a90668c9 | ||
|
|
c3a76007d0 | ||
|
|
6869eaf869 | ||
|
|
e60ce69102 | ||
|
|
18fdd04782 | ||
|
|
4c026307fb | ||
|
|
a406b08981 | ||
|
|
f82c8401c2 | ||
|
|
e4f3402035 | ||
|
|
4f2f45b953 | ||
|
|
9e168b75cf | ||
|
|
37ff980a44 | ||
|
|
1101997844 | ||
|
|
9e57f29b3b | ||
|
|
623786c659 | ||
|
|
4d508b4c37 | ||
|
|
21d112bddb | ||
|
|
0071b343f1 | ||
|
|
bc64aef5ca | ||
|
|
6206976dbb |
30
.ansible-lint
Normal file
30
.ansible-lint
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
skip_list:
|
||||
# Ignore rules that make no sense:
|
||||
- galaxy[tags]
|
||||
- galaxy[version-incorrect]
|
||||
- meta-runtime[unsupported-version]
|
||||
- no-changed-when
|
||||
- sanity[cannot-ignore] # some of the rules you cannot ignore actually MUST be ignored, like yamllint:unparsable-with-libyaml
|
||||
- yaml # we're using yamllint ourselves
|
||||
- run-once[task] # wtf???
|
||||
|
||||
# To be checked and maybe fixed:
|
||||
- ignore-errors
|
||||
- key-order[task]
|
||||
- name[casing]
|
||||
- name[missing]
|
||||
- name[play]
|
||||
- name[template]
|
||||
- no-free-form
|
||||
- no-handler
|
||||
- risky-file-permissions
|
||||
- risky-shell-pipe
|
||||
- var-naming[no-reserved]
|
||||
- var-naming[no-role-prefix]
|
||||
- var-naming[pattern]
|
||||
- var-naming[read-only]
|
||||
@ -1,3 +1,9 @@
|
||||
<!--
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
-->
|
||||
|
||||
## Azure Pipelines Configuration
|
||||
|
||||
Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
|
||||
|
||||
@ -1,3 +1,8 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
trigger:
|
||||
batch: true
|
||||
branches:
|
||||
@ -24,15 +29,13 @@ schedules:
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-*
|
||||
- stable-4
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
value: ansible_collections/community/docker
|
||||
- name: coverageBranches
|
||||
value: main
|
||||
- name: pipelinesCoverage
|
||||
value: coverage
|
||||
- name: entryPoint
|
||||
value: tests/utils/shippable/shippable.sh
|
||||
- name: fetchDepth
|
||||
@ -41,7 +44,7 @@ variables:
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:3.0.0
|
||||
image: quay.io/ansible/azure-pipelines-test-container:7.0.0
|
||||
|
||||
pool: Standard
|
||||
|
||||
@ -57,65 +60,41 @@ stages:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: 'devel/sanity/1'
|
||||
- name: Sanity Extra # Only on devel
|
||||
test: 'devel/sanity/extra'
|
||||
- name: Units
|
||||
test: 'devel/units/1'
|
||||
- stage: Ansible_2_13
|
||||
displayName: Sanity & Units 2.13
|
||||
- stage: Ansible_2_20
|
||||
displayName: Sanity & Units 2.20
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.13/sanity/1'
|
||||
test: '2.20/sanity/1'
|
||||
- name: Units
|
||||
test: '2.13/units/1'
|
||||
- stage: Ansible_2_12
|
||||
displayName: Sanity & Units 2.12
|
||||
test: '2.20/units/1'
|
||||
- stage: Ansible_2_19
|
||||
displayName: Sanity & Units 2.19
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.12/sanity/1'
|
||||
test: '2.19/sanity/1'
|
||||
- name: Units
|
||||
test: '2.12/units/1'
|
||||
- stage: Ansible_2_11
|
||||
displayName: Sanity & Units 2.11
|
||||
test: '2.19/units/1'
|
||||
- stage: Ansible_2_18
|
||||
displayName: Sanity & Units 2.18
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.11/sanity/1'
|
||||
test: '2.18/sanity/1'
|
||||
- name: Units
|
||||
test: '2.11/units/1'
|
||||
- stage: Ansible_2_10
|
||||
displayName: Sanity & Units 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.10/sanity/1'
|
||||
- name: Units
|
||||
test: '2.10/units/1'
|
||||
- stage: Ansible_2_9
|
||||
displayName: Sanity & Units 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.9/sanity/1'
|
||||
- name: Units
|
||||
test: '2.9/units/1'
|
||||
test: '2.18/units/1'
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
@ -126,97 +105,61 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 36
|
||||
test: fedora36
|
||||
- name: Fedora 35
|
||||
test: fedora35
|
||||
- name: openSUSE 15
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: Fedora 42
|
||||
test: fedora42
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu2204
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
- name: Ubuntu 24.04
|
||||
test: ubuntu2404
|
||||
- name: Alpine 3.22
|
||||
test: alpine322
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_13
|
||||
displayName: Docker 2.13
|
||||
- stage: Docker_2_20
|
||||
displayName: Docker 2.20
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/linux/{0}
|
||||
testFormat: 2.20/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
- name: Fedora 42
|
||||
test: fedora42
|
||||
- name: Alpine 3.22
|
||||
test: alpine322
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_12
|
||||
displayName: Docker 2.12
|
||||
- stage: Docker_2_19
|
||||
displayName: Docker 2.19
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/linux/{0}
|
||||
testFormat: 2.19/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
- name: Fedora 41
|
||||
test: fedora41
|
||||
- name: Alpine 3.21
|
||||
test: alpine321
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
- stage: Docker_2_18
|
||||
displayName: Docker 2.18
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/linux/{0}
|
||||
testFormat: 2.18/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_10
|
||||
displayName: Docker 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.10/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_9
|
||||
displayName: Docker 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.9/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 31
|
||||
test: fedora31
|
||||
- name: Fedora 40
|
||||
test: fedora40
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu2204
|
||||
- name: Alpine 3.20
|
||||
test: alpine320
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
@ -230,12 +173,14 @@ stages:
|
||||
parameters:
|
||||
testFormat: devel/linux-community/{0}
|
||||
targets:
|
||||
- name: Debian Bullseye
|
||||
- name: Debian 11 Bullseye
|
||||
test: debian-bullseye/3.9
|
||||
- name: Debian 12 Bookworm
|
||||
test: debian-bookworm/3.11
|
||||
- name: Debian 13 Trixie
|
||||
test: debian-13-trixie/3.13
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.10
|
||||
- name: CentOS Stream 8
|
||||
test: centos-stream8/3.8
|
||||
test: archlinux/3.13
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
@ -247,95 +192,71 @@ stages:
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: devel/rhel/{0}
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- test: '7.9'
|
||||
- test: '9.0-pypi-latest'
|
||||
- name: RHEL 10.0
|
||||
test: rhel/10.0
|
||||
- name: RHEL 9.6 with Docker SDK, urllib3, requests from sources
|
||||
test: rhel/9.6-dev-latest
|
||||
# For some reason, Ubuntu 24.04 is *extremely* slower than RHEL 9.6
|
||||
# - name: Ubuntu 24.04
|
||||
# test: ubuntu/24.04
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_13
|
||||
displayName: Remote 2.13
|
||||
- stage: Remote_2_20
|
||||
displayName: Remote 2.20
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.13/rhel/{0}
|
||||
testFormat: 2.20/{0}
|
||||
targets:
|
||||
- test: '8.5'
|
||||
- name: RHEL 9.6
|
||||
test: rhel/9.6
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_12
|
||||
displayName: Remote 2.12
|
||||
- stage: Remote_2_19
|
||||
displayName: Remote 2.19
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.12/rhel/{0}
|
||||
testFormat: 2.19/{0}
|
||||
targets:
|
||||
- test: '8.4'
|
||||
- name: RHEL 9.5
|
||||
test: rhel/9.5
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu/22.04
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
- stage: Remote_2_18
|
||||
displayName: Remote 2.18
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.11/rhel/{0}
|
||||
testFormat: 2.18/{0}
|
||||
targets:
|
||||
- test: '8.3'
|
||||
- name: RHEL 9.4
|
||||
test: rhel/9.4
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_10
|
||||
displayName: Remote 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.10/rhel/{0}
|
||||
targets:
|
||||
- test: '7.8'
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- stage: Remote_2_9
|
||||
displayName: Remote 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.9/rhel/{0}
|
||||
targets:
|
||||
- test: '8.2'
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
|
||||
## Finally
|
||||
|
||||
@ -343,23 +264,17 @@ stages:
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Ansible_devel
|
||||
- Ansible_2_13
|
||||
- Ansible_2_12
|
||||
- Ansible_2_11
|
||||
- Ansible_2_10
|
||||
- Ansible_2_9
|
||||
- Ansible_2_20
|
||||
- Ansible_2_19
|
||||
- Ansible_2_18
|
||||
- Remote_devel
|
||||
- Remote_2_13
|
||||
- Remote_2_12
|
||||
- Remote_2_11
|
||||
- Remote_2_10
|
||||
- Remote_2_9
|
||||
- Remote_2_20
|
||||
- Remote_2_19
|
||||
- Remote_2_18
|
||||
- Docker_devel
|
||||
- Docker_2_13
|
||||
- Docker_2_12
|
||||
- Docker_2_11
|
||||
- Docker_2_10
|
||||
- Docker_2_9
|
||||
- Docker_2_20
|
||||
- Docker_2_19
|
||||
- Docker_2_18
|
||||
- Docker_community_devel
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
@ -1,6 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# Aggregate code coverage results for later processing.
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
agent_temp_directory="$1"
|
||||
|
||||
@ -1,4 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""
|
||||
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
|
||||
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
|
||||
|
||||
@ -1,6 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# Check the test results and set variables for use in later steps.
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
if [[ "$PWD" =~ /ansible_collections/ ]]; then
|
||||
|
||||
@ -1,4 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""
|
||||
Upload code coverage reports to codecov.io.
|
||||
Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||
|
||||
@ -1,6 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
@ -1,6 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# Configure the test environment and run the tests.
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
entry_point="$1"
|
||||
|
||||
@ -1,4 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
@ -1,3 +1,8 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# This template adds a job for processing code coverage data.
|
||||
# It will upload results to Azure Pipelines and codecov.io.
|
||||
# Use it from a job stage that completes after all other jobs have completed.
|
||||
@ -23,16 +28,6 @@ jobs:
|
||||
- bash: .azure-pipelines/scripts/report-coverage.sh
|
||||
displayName: Generate Coverage Report
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- task: PublishCodeCoverageResults@1
|
||||
inputs:
|
||||
codeCoverageTool: Cobertura
|
||||
# Azure Pipelines only accepts a single coverage data file.
|
||||
# That means only Python or PowerShell coverage can be uploaded, but not both.
|
||||
# Set the "pipelinesCoverage" variable to determine which type is uploaded.
|
||||
# Use "coverage" for Python and "coverage-powershell" for PowerShell.
|
||||
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
|
||||
displayName: Publish to Azure Pipelines
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
|
||||
displayName: Publish to codecov.io
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
|
||||
@ -1,3 +1,8 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
|
||||
# If this matrix template does not provide the required functionality, consider using the test template directly instead.
|
||||
|
||||
@ -45,11 +50,11 @@ jobs:
|
||||
parameters:
|
||||
jobs:
|
||||
- ${{ if eq(length(parameters.groups), 0) }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
|
||||
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
|
||||
- ${{ if not(eq(length(parameters.groups), 0)) }}:
|
||||
- ${{ each group in parameters.groups }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
|
||||
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
|
||||
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
|
||||
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
|
||||
- ${{ if not(eq(length(parameters.groups), 0)) }}:
|
||||
- ${{ each group in parameters.groups }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
|
||||
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
|
||||
|
||||
@ -1,3 +1,8 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# This template uses the provided list of jobs to create test one or more test jobs.
|
||||
# It can be used directly if needed, or through the matrix template.
|
||||
|
||||
@ -9,37 +14,37 @@ parameters:
|
||||
|
||||
jobs:
|
||||
- ${{ each job in parameters.jobs }}:
|
||||
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
|
||||
displayName: ${{ job.name }}
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
|
||||
displayName: Run Tests
|
||||
- bash: .azure-pipelines/scripts/process-results.sh
|
||||
condition: succeededOrFailed()
|
||||
displayName: Process Results
|
||||
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Aggregate Coverage Data
|
||||
- task: PublishTestResults@2
|
||||
condition: eq(variables.haveTestResults, 'true')
|
||||
inputs:
|
||||
testResultsFiles: "$(outputPath)/junit/*.xml"
|
||||
displayName: Publish Test Results
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveBotResults, 'true')
|
||||
displayName: Publish Bot Results
|
||||
inputs:
|
||||
targetPath: "$(outputPath)/bot/"
|
||||
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Publish Coverage Data
|
||||
inputs:
|
||||
targetPath: "$(Agent.TempDirectory)/coverage/"
|
||||
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
|
||||
displayName: ${{ job.name }}
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
|
||||
displayName: Run Tests
|
||||
- bash: .azure-pipelines/scripts/process-results.sh
|
||||
condition: succeededOrFailed()
|
||||
displayName: Process Results
|
||||
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Aggregate Coverage Data
|
||||
- task: PublishTestResults@2
|
||||
condition: eq(variables.haveTestResults, 'true')
|
||||
inputs:
|
||||
testResultsFiles: "$(outputPath)/junit/*.xml"
|
||||
displayName: Publish Test Results
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveBotResults, 'true')
|
||||
displayName: Publish Bot Results
|
||||
inputs:
|
||||
targetPath: "$(outputPath)/bot/"
|
||||
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Publish Coverage Data
|
||||
inputs:
|
||||
targetPath: "$(Agent.TempDirectory)/coverage/"
|
||||
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
|
||||
13
.flake8
Normal file
13
.flake8
Normal file
@ -0,0 +1,13 @@
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||
|
||||
[flake8]
|
||||
extend-ignore = E203, E402, F401
|
||||
count = true
|
||||
# TODO: decrease this to ~10
|
||||
max-complexity = 60
|
||||
# black's max-line-length is 89, but it doesn't touch long string literals.
|
||||
# Since ansible-test's limit is 160, let's use that here.
|
||||
max-line-length = 160
|
||||
statistics = true
|
||||
8
.git-blame-ignore-revs
Normal file
8
.git-blame-ignore-revs
Normal file
@ -0,0 +1,8 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Reformat YAML: https://github.com/ansible-collections/community.docker/pull/1071
|
||||
2487d1a0bf4f2c79d3ab5a9e7d0f969432bf32a2
|
||||
# Reformat with black and isort
|
||||
d65d37e9e9a78e03a35643704b413121515ee39c
|
||||
15
.github/dependabot.yml
vendored
Normal file
15
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
groups:
|
||||
ci:
|
||||
patterns:
|
||||
- "*"
|
||||
4
.github/patchback.yml
vendored
4
.github/patchback.yml
vendored
@ -1,4 +1,8 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
backport_branch_prefix: patchback/backports/
|
||||
backport_label_prefix: backport-
|
||||
target_branch_prefix: stable-
|
||||
|
||||
90
.github/workflows/docker-images.yml
vendored
Normal file
90
.github/workflows/docker-images.yml
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: Helper Docker images for testing
|
||||
'on':
|
||||
# Run CI against all pushes (direct commits, also merged PRs), Pull Requests
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- .github/workflows/docker-images.yml
|
||||
- tests/images/**
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- .github/workflows/docker-images.yml
|
||||
- tests/images/**
|
||||
# Run CI once per day (at 03:00 UTC)
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
|
||||
env:
|
||||
CONTAINER_REGISTRY: ghcr.io/ansible-collections
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build image ${{ matrix.name }}:${{ matrix.tag }}
|
||||
runs-on: ubuntu-24.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: simple-1
|
||||
tag: tag
|
||||
tag-as-latest: true
|
||||
- name: simple-2
|
||||
tag: tag
|
||||
tag-as-latest: true
|
||||
- name: healthcheck
|
||||
tag: check
|
||||
tag-as-latest: true
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get install podman buildah
|
||||
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '1.22'
|
||||
cache: false # true (default) results in warnings since we don't use Go modules
|
||||
|
||||
- name: Build ${{ matrix.name }} image
|
||||
run: |
|
||||
./build.sh "${CONTAINER_REGISTRY}/${{ matrix.name }}:${{ matrix.tag }}"
|
||||
working-directory: tests/images/${{ matrix.name }}
|
||||
|
||||
- name: Tag image as latest
|
||||
if: matrix.tag-as-latest && matrix.tag != 'latest'
|
||||
run: |
|
||||
podman tag "${CONTAINER_REGISTRY}/${{ matrix.name }}:${{ matrix.tag }}" "${CONTAINER_REGISTRY}/${{ matrix.name }}:latest"
|
||||
|
||||
- name: Publish container image ${{ env.CONTAINER_REGISTRY }}/${{ matrix.name }}:${{ matrix.tag }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: redhat-actions/push-to-registry@v2
|
||||
with:
|
||||
registry: ${{ env.CONTAINER_REGISTRY }}
|
||||
image: ${{ matrix.name }}
|
||||
tags: ${{ matrix.tag }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Publish container image ${{ env.CONTAINER_REGISTRY }}/${{ matrix.name }}:latest
|
||||
if: github.event_name != 'pull_request' && matrix.tag-as-latest && matrix.tag != 'latest'
|
||||
uses: redhat-actions/push-to-registry@v2
|
||||
with:
|
||||
registry: ${{ env.CONTAINER_REGISTRY }}
|
||||
image: ${{ matrix.name }}
|
||||
tags: latest
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
55
.github/workflows/docs-pr.yml
vendored
55
.github/workflows/docs-pr.yml
vendored
@ -1,23 +1,61 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: Collection Docs
|
||||
concurrency:
|
||||
group: docs-${{ github.head_ref }}
|
||||
group: docs-pr-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
on:
|
||||
'on':
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened, closed]
|
||||
|
||||
env:
|
||||
GHP_BASE_URL: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
build-docs:
|
||||
permissions:
|
||||
contents: read
|
||||
name: Build Ansible Docs
|
||||
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-pr.yml@main
|
||||
with:
|
||||
collection-name: community.docker
|
||||
init-lenient: false
|
||||
init-fail-on-error: true
|
||||
squash-hierarchy: true
|
||||
init-project: Community.Docker Collection
|
||||
init-copyright: Community.Docker Contributors
|
||||
init-title: Community.Docker Collection Documentation
|
||||
init-html-short-title: Community.Docker Collection Docs
|
||||
init-extra-html-theme-options: |
|
||||
documentation_home_url=https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/branch/main/
|
||||
render-file-line: '> * `$<status>` [$<path_tail>](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/pr/${{ github.event.number }}/$<path_tail>)'
|
||||
extra-collections: community.library_inventory_filtering_v1
|
||||
|
||||
publish-docs-gh-pages:
|
||||
# for now we won't run this on forks
|
||||
if: github.repository == 'ansible-collections/community.docker'
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
id-token: write
|
||||
needs: [build-docs]
|
||||
name: Publish Ansible Docs
|
||||
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main
|
||||
with:
|
||||
artifact-name: ${{ needs.build-docs.outputs.artifact-name }}
|
||||
action: ${{ (github.event.action == 'closed' || needs.build-docs.outputs.changed != 'true') && 'teardown' || 'publish' }}
|
||||
publish-gh-pages-branch: true
|
||||
secrets:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
comment:
|
||||
permissions:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-docs
|
||||
needs: [build-docs, publish-docs-gh-pages]
|
||||
name: PR comments
|
||||
steps:
|
||||
- name: PR comment
|
||||
@ -35,13 +73,20 @@ jobs:
|
||||
|
||||
Thank you for contribution!✨
|
||||
|
||||
This PR has been merged and your docs changes will be incorporated when they are next published.
|
||||
This PR has been merged and the docs are now incorporated into `main`:
|
||||
${{ env.GHP_BASE_URL }}/branch/main
|
||||
body: |
|
||||
## Docs Build 📝
|
||||
|
||||
Thank you for contribution!✨
|
||||
|
||||
The docsite for **this PR** is available for download as an artifact from this run:
|
||||
The docs for **this PR** have been published here:
|
||||
${{ env.GHP_BASE_URL }}/pr/${{ github.event.number }}
|
||||
|
||||
You can compare to the docs for the `main` branch here:
|
||||
${{ env.GHP_BASE_URL }}/branch/main
|
||||
|
||||
The docsite for **this PR** is also available for download as an artifact from this run:
|
||||
${{ needs.build-docs.outputs.artifact-url }}
|
||||
|
||||
File changes:
|
||||
|
||||
56
.github/workflows/docs-push.yml
vendored
Normal file
56
.github/workflows/docs-push.yml
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: Collection Docs
|
||||
concurrency:
|
||||
group: docs-push-${{ github.sha }}
|
||||
cancel-in-progress: true
|
||||
'on':
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
tags:
|
||||
- '*'
|
||||
# Run CI once per day (at 09:00 UTC)
|
||||
schedule:
|
||||
- cron: '0 9 * * *'
|
||||
# Allow manual trigger (for newer antsibull-docs, sphinx-ansible-theme, ... versions)
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-docs:
|
||||
permissions:
|
||||
contents: read
|
||||
name: Build Ansible Docs
|
||||
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-push.yml@main
|
||||
with:
|
||||
collection-name: community.docker
|
||||
init-lenient: false
|
||||
init-fail-on-error: true
|
||||
squash-hierarchy: true
|
||||
init-project: Community.Docker Collection
|
||||
init-copyright: Community.Docker Contributors
|
||||
init-title: Community.Docker Collection Documentation
|
||||
init-html-short-title: Community.Docker Collection Docs
|
||||
init-extra-html-theme-options: |
|
||||
documentation_home_url=https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/branch/main/
|
||||
extra-collections: community.library_inventory_filtering_v1
|
||||
|
||||
publish-docs-gh-pages:
|
||||
# for now we won't run this on forks
|
||||
if: github.repository == 'ansible-collections/community.docker'
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
id-token: write
|
||||
needs: [build-docs]
|
||||
name: Publish Ansible Docs
|
||||
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main
|
||||
with:
|
||||
artifact-name: ${{ needs.build-docs.outputs.artifact-name }}
|
||||
publish-gh-pages-branch: true
|
||||
secrets:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
114
.github/workflows/ee.yml
vendored
114
.github/workflows/ee.yml
vendored
@ -1,114 +0,0 @@
|
||||
---
|
||||
name: execution environment
|
||||
on:
|
||||
# Run CI against all pushes (direct commits, also merged PRs), Pull Requests
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
pull_request:
|
||||
# Run CI once per day (at 04:30 UTC)
|
||||
# This ensures that even if there haven't been commits that we are still testing against latest version of ansible-builder
|
||||
schedule:
|
||||
- cron: '30 4 * * *'
|
||||
|
||||
env:
|
||||
NAMESPACE: community
|
||||
COLLECTION_NAME: docker
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and test EE (Ⓐ${{ matrix.runner_tag }})
|
||||
strategy:
|
||||
matrix:
|
||||
runner_tag:
|
||||
- devel
|
||||
- stable-2.12-latest
|
||||
- stable-2.11-latest
|
||||
- stable-2.9-latest
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install ansible-builder and ansible-navigator
|
||||
run: pip install ansible-builder ansible-navigator
|
||||
|
||||
- name: Verify requirements
|
||||
run: ansible-builder introspect --sanitize .
|
||||
|
||||
- name: Make sure galaxy.yml has version entry
|
||||
run: >-
|
||||
python -c
|
||||
'import yaml ;
|
||||
f = open("galaxy.yml", "rb") ;
|
||||
data = yaml.safe_load(f) ;
|
||||
f.close() ;
|
||||
data["version"] = data.get("version") or "0.0.1" ;
|
||||
f = open("galaxy.yml", "wb") ;
|
||||
f.write(yaml.dump(data).encode("utf-8")) ;
|
||||
f.close() ;
|
||||
'
|
||||
working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
|
||||
|
||||
- name: Build collection
|
||||
run: |
|
||||
ansible-galaxy collection build --output-path ../../../
|
||||
working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
|
||||
|
||||
- name: Create files for building execution environment
|
||||
run: |
|
||||
COLLECTION_FILENAME="$(ls "${{ env.NAMESPACE }}-${{ env.COLLECTION_NAME }}"-*.tar.gz)"
|
||||
|
||||
# EE config
|
||||
cat > execution-environment.yml <<EOF
|
||||
---
|
||||
version: 1
|
||||
build_arg_defaults:
|
||||
EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:${{ matrix.runner_tag }}'
|
||||
dependencies:
|
||||
galaxy: requirements.yml
|
||||
EOF
|
||||
echo "::group::execution-environment.yml"
|
||||
cat execution-environment.yml
|
||||
echo "::endgroup::"
|
||||
|
||||
# Requirements
|
||||
cat > requirements.yml <<EOF
|
||||
---
|
||||
collections:
|
||||
- name: ${COLLECTION_FILENAME}
|
||||
type: file
|
||||
EOF
|
||||
echo "::group::requirements.yml"
|
||||
cat requirements.yml
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Build image based on ${{ matrix.runner_tag }}
|
||||
run: |
|
||||
mkdir -p context/_build/
|
||||
cp "${{ env.NAMESPACE }}-${{ env.COLLECTION_NAME }}"-*.tar.gz context/_build/
|
||||
ansible-builder build -v 3 -t test-ee:latest --container-runtime=docker
|
||||
|
||||
- name: Make /var/run/docker.sock accessible by everyone
|
||||
run: sudo chmod a+rw /var/run/docker.sock
|
||||
|
||||
- name: Run basic tests
|
||||
run: >
|
||||
ansible-navigator run
|
||||
--mode stdout
|
||||
--pull-policy never
|
||||
--set-environment-variable ANSIBLE_PRIVATE_ROLE_VARS=true
|
||||
--container-engine docker
|
||||
--container-options=-v --container-options=/var/run/docker.sock:/var/run/docker.sock
|
||||
--execution-environment-image test-ee:latest
|
||||
-v
|
||||
all.yml
|
||||
working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}/tests/ee
|
||||
35
.github/workflows/nox.yml
vendored
Normal file
35
.github/workflows/nox.yml
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: nox
|
||||
'on':
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
pull_request:
|
||||
# Run CI once per day (at 09:00 UTC)
|
||||
schedule:
|
||||
- cron: '0 9 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
nox:
|
||||
uses: ansible-community/antsibull-nox/.github/workflows/reusable-nox-run.yml@main
|
||||
with:
|
||||
session-name: Run extra sanity tests
|
||||
change-detection-in-prs: true
|
||||
|
||||
ansible-test:
|
||||
uses: ansible-community/antsibull-nox/.github/workflows/reusable-nox-matrix.yml@main
|
||||
with:
|
||||
change-detection-in-prs: true
|
||||
upload-codecov: true
|
||||
upload-codecov-pr: false
|
||||
upload-codecov-push: false
|
||||
upload-codecov-schedule: true
|
||||
max-ansible-core: "2.17"
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@ -1,5 +1,10 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/tests/output/
|
||||
/changelogs/.plugin-cache.yaml
|
||||
/tests/integration/inventory
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
@ -130,3 +135,6 @@ dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# PyCharm
|
||||
.idea
|
||||
7
.isort.cfg
Normal file
7
.isort.cfg
Normal file
@ -0,0 +1,7 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
[isort]
|
||||
profile=black
|
||||
lines_after_imports = 2
|
||||
27
.mypy.ini
Normal file
27
.mypy.ini
Normal file
@ -0,0 +1,27 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
[mypy]
|
||||
check_untyped_defs = True
|
||||
disallow_untyped_defs = True
|
||||
|
||||
# strict = True -- only try to enable once everything (including dependencies!) is typed
|
||||
strict_equality = True
|
||||
strict_bytes = True
|
||||
|
||||
warn_redundant_casts = True
|
||||
# warn_return_any = True
|
||||
warn_unreachable = True
|
||||
|
||||
[mypy-ansible.*]
|
||||
# ansible-core has partial typing information
|
||||
follow_untyped_imports = True
|
||||
|
||||
[mypy-docker.*]
|
||||
# Docker SDK for Python has partial typing information
|
||||
follow_untyped_imports = True
|
||||
|
||||
[mypy-jsondiff.*]
|
||||
# jsondiff has no typing information
|
||||
ignore_missing_imports = True
|
||||
598
.pylintrc
Normal file
598
.pylintrc
Normal file
@ -0,0 +1,598 @@
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||
|
||||
[MAIN]
|
||||
|
||||
# Clear in-memory caches upon conclusion of linting. Useful if running pylint
|
||||
# in a server-like mode.
|
||||
clear-cache-post-run=no
|
||||
|
||||
# Load and enable all available extensions. Use --list-extensions to see a list
|
||||
# all available extensions.
|
||||
#enable-all-extensions=
|
||||
|
||||
# Specify a score threshold under which the program will exit with error.
|
||||
fail-under=10
|
||||
|
||||
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
|
||||
# number of processors available to use, and will cap the count on Windows to
|
||||
# avoid hangs.
|
||||
jobs=0
|
||||
|
||||
# Minimum Python version to use for version dependent checks. Will default to
|
||||
# the version used to run pylint.
|
||||
py-version=3.7
|
||||
|
||||
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
||||
# active Python interpreter and may run arbitrary code.
|
||||
unsafe-load-any-extension=no
|
||||
|
||||
# In verbose mode, extra non-checker-related info will be displayed.
|
||||
#verbose=
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# Naming style matching correct argument names.
|
||||
argument-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct argument names. Overrides argument-
|
||||
# naming-style. If left empty, argument names will be checked with the set
|
||||
# naming style.
|
||||
#argument-rgx=
|
||||
|
||||
# Naming style matching correct attribute names.
|
||||
attr-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct attribute names. Overrides attr-naming-
|
||||
# style. If left empty, attribute names will be checked with the set naming
|
||||
# style.
|
||||
#attr-rgx=
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma.
|
||||
bad-names=foo,
|
||||
bar,
|
||||
baz,
|
||||
toto,
|
||||
tutu,
|
||||
tata
|
||||
|
||||
# Bad variable names regexes, separated by a comma. If names match any regex,
|
||||
# they will always be refused
|
||||
bad-names-rgxs=
|
||||
|
||||
# Naming style matching correct class attribute names.
|
||||
class-attribute-naming-style=any
|
||||
|
||||
# Regular expression matching correct class attribute names. Overrides class-
|
||||
# attribute-naming-style. If left empty, class attribute names will be checked
|
||||
# with the set naming style.
|
||||
#class-attribute-rgx=
|
||||
|
||||
# Naming style matching correct class constant names.
|
||||
class-const-naming-style=UPPER_CASE
|
||||
|
||||
# Regular expression matching correct class constant names. Overrides class-
|
||||
# const-naming-style. If left empty, class constant names will be checked with
|
||||
# the set naming style.
|
||||
#class-const-rgx=
|
||||
|
||||
# Naming style matching correct class names.
|
||||
class-naming-style=PascalCase
|
||||
|
||||
# Regular expression matching correct class names. Overrides class-naming-
|
||||
# style. If left empty, class names will be checked with the set naming style.
|
||||
#class-rgx=
|
||||
|
||||
# Naming style matching correct constant names.
|
||||
const-naming-style=UPPER_CASE
|
||||
|
||||
# Regular expression matching correct constant names. Overrides const-naming-
|
||||
# style. If left empty, constant names will be checked with the set naming
|
||||
# style.
|
||||
#const-rgx=
|
||||
|
||||
# Minimum line length for functions/classes that require docstrings, shorter
|
||||
# ones are exempt.
|
||||
docstring-min-length=-1
|
||||
|
||||
# Naming style matching correct function names.
|
||||
function-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct function names. Overrides function-
|
||||
# naming-style. If left empty, function names will be checked with the set
|
||||
# naming style.
|
||||
#function-rgx=
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma.
|
||||
good-names=i,
|
||||
j,
|
||||
k,
|
||||
ex,
|
||||
Run,
|
||||
_
|
||||
|
||||
# Good variable names regexes, separated by a comma. If names match any regex,
|
||||
# they will always be accepted
|
||||
good-names-rgxs=
|
||||
|
||||
# Include a hint for the correct naming format with invalid-name.
|
||||
include-naming-hint=no
|
||||
|
||||
# Naming style matching correct inline iteration names.
|
||||
inlinevar-naming-style=any
|
||||
|
||||
# Regular expression matching correct inline iteration names. Overrides
|
||||
# inlinevar-naming-style. If left empty, inline iteration names will be checked
|
||||
# with the set naming style.
|
||||
#inlinevar-rgx=
|
||||
|
||||
# Naming style matching correct method names.
|
||||
method-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct method names. Overrides method-naming-
|
||||
# style. If left empty, method names will be checked with the set naming style.
|
||||
#method-rgx=
|
||||
|
||||
# Naming style matching correct module names.
|
||||
module-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct module names. Overrides module-naming-
|
||||
# style. If left empty, module names will be checked with the set naming style.
|
||||
#module-rgx=
|
||||
|
||||
# Colon-delimited sets of names that determine each other's naming style when
|
||||
# the name regexes allow several styles.
|
||||
name-group=
|
||||
|
||||
# Regular expression which should only match function or class names that do
|
||||
# not require a docstring.
|
||||
no-docstring-rgx=^_
|
||||
|
||||
# List of decorators that produce properties, such as abc.abstractproperty. Add
|
||||
# to this list to register other decorators that produce valid properties.
|
||||
# These decorators are taken in consideration only for invalid-name.
|
||||
property-classes=abc.abstractproperty
|
||||
|
||||
# Regular expression matching correct type alias names. If left empty, type
|
||||
# alias names will be checked with the set naming style.
|
||||
#typealias-rgx=
|
||||
|
||||
# Regular expression matching correct type variable names. If left empty, type
|
||||
# variable names will be checked with the set naming style.
|
||||
#typevar-rgx=
|
||||
|
||||
# Naming style matching correct variable names.
|
||||
variable-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct variable names. Overrides variable-
|
||||
# naming-style. If left empty, variable names will be checked with the set
|
||||
# naming style.
|
||||
#variable-rgx=
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# Warn about protected attribute access inside special methods
|
||||
check-protected-access-in-special-methods=no
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,
|
||||
__new__,
|
||||
setUp,
|
||||
asyncSetUp,
|
||||
__post_init__
|
||||
|
||||
# List of member names, which should be excluded from the protected access
|
||||
# warning.
|
||||
exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
# List of valid names for the first argument in a metaclass class method.
|
||||
valid-metaclass-classmethod-first-arg=mcs
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# List of regular expressions of class ancestor names to ignore when counting
|
||||
# public methods (see R0903)
|
||||
exclude-too-few-public-methods=
|
||||
|
||||
# List of qualified class names to ignore when counting class parents (see
|
||||
# R0901)
|
||||
ignored-parents=
|
||||
|
||||
# Maximum number of arguments for function / method.
|
||||
max-args=5
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Maximum number of boolean expressions in an if statement (see R0916).
|
||||
max-bool-expr=5
|
||||
|
||||
# Maximum number of branch for function / method body.
|
||||
max-branches=12
|
||||
|
||||
# Maximum number of locals for function / method body.
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of positional arguments for function / method.
|
||||
max-positional-arguments=5
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
# Maximum number of return / yield for function / method body.
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of statements in function / method body.
|
||||
max-statements=50
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when caught.
|
||||
overgeneral-exceptions=builtins.BaseException,builtins.Exception
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
||||
expected-line-ending-format=
|
||||
|
||||
# Regexp for a line that is allowed to be longer than the limit.
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
|
||||
# Number of spaces of indent required inside a hanging or continued line.
|
||||
indent-after-paren=4
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=160
|
||||
|
||||
# Maximum number of lines in a module.
|
||||
max-module-lines=1000
|
||||
|
||||
# Allow the body of a class to be on the same line as the declaration if body
|
||||
# contains single statement.
|
||||
single-line-class-stmt=no
|
||||
|
||||
# Allow the body of an if to be on the same line as the test if there is no
|
||||
# else.
|
||||
single-line-if-stmt=no
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# List of modules that can be imported at any level, not just the top level
|
||||
# one.
|
||||
allow-any-import-level=
|
||||
|
||||
# Allow explicit reexports by alias from a package __init__.
|
||||
allow-reexport-from-package=no
|
||||
|
||||
# Allow wildcard imports from modules that define __all__.
|
||||
allow-wildcard-with-all=no
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma.
|
||||
deprecated-modules=
|
||||
|
||||
# Output a graph (.gv or any supported image format) of external dependencies
|
||||
# to the given file (report RP0402 must not be disabled).
|
||||
ext-import-graph=
|
||||
|
||||
# Output a graph (.gv or any supported image format) of all (i.e. internal and
|
||||
# external) dependencies to the given file (report RP0402 must not be
|
||||
# disabled).
|
||||
import-graph=
|
||||
|
||||
# Output a graph (.gv or any supported image format) of internal dependencies
|
||||
# to the given file (report RP0402 must not be disabled).
|
||||
int-import-graph=
|
||||
|
||||
# Force import order to recognize a module as part of the standard
|
||||
# compatibility libraries.
|
||||
known-standard-library=
|
||||
|
||||
# Force import order to recognize a module as part of a third party library.
|
||||
known-third-party=enchant
|
||||
|
||||
# Couples of modules and preferred modules, separated by a comma.
|
||||
preferred-modules=
|
||||
|
||||
|
||||
[LOGGING]
|
||||
|
||||
# The type of string formatting that logging methods do. `old` means using %
|
||||
# formatting, `new` is for `{}` formatting.
|
||||
logging-format-style=old
|
||||
|
||||
# Logging modules to check that the string format arguments are in logging
|
||||
# function parameter format.
|
||||
logging-modules=logging
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Only show warnings with the listed confidence levels. Leave empty to show
|
||||
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
|
||||
# UNDEFINED.
|
||||
confidence=HIGH,
|
||||
CONTROL_FLOW,
|
||||
INFERENCE,
|
||||
INFERENCE_FAILURE,
|
||||
UNDEFINED
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifiers separated by comma (,) or put this
|
||||
# option multiple times (only on the command line, not in the configuration
|
||||
# file where it should appear only once). You can also use "--disable=all" to
|
||||
# disable everything first and then re-enable specific checks. For example, if
|
||||
# you want to run only the similarities checker, you can use "--disable=all
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use "--disable=all --enable=classes
|
||||
# --disable=W".
|
||||
disable=raw-checker-failed,
|
||||
bad-inline-option,
|
||||
deprecated-pragma,
|
||||
duplicate-code,
|
||||
file-ignored,
|
||||
import-outside-toplevel,
|
||||
missing-class-docstring,
|
||||
missing-function-docstring,
|
||||
missing-module-docstring,
|
||||
locally-disabled,
|
||||
suppressed-message,
|
||||
use-implicit-booleaness-not-comparison,
|
||||
use-implicit-booleaness-not-comparison-to-string,
|
||||
use-implicit-booleaness-not-comparison-to-zero,
|
||||
superfluous-parens,
|
||||
too-few-public-methods,
|
||||
too-many-ancestors,
|
||||
too-many-arguments,
|
||||
too-many-boolean-expressions,
|
||||
too-many-branches,
|
||||
too-many-function-args,
|
||||
too-many-instance-attributes,
|
||||
too-many-lines,
|
||||
too-many-locals,
|
||||
too-many-nested-blocks,
|
||||
too-many-positional-arguments,
|
||||
too-many-public-methods,
|
||||
too-many-return-statements,
|
||||
too-many-statements,
|
||||
ungrouped-imports,
|
||||
useless-parent-delegation,
|
||||
wrong-import-order,
|
||||
wrong-import-position,
|
||||
# To clean up:
|
||||
fixme,
|
||||
import-error, # TODO figure out why pylint cannot find the module
|
||||
no-name-in-module, # TODO figure out why pylint cannot find the module
|
||||
protected-access,
|
||||
subprocess-popen-preexec-fn,
|
||||
unexpected-keyword-arg,
|
||||
unused-argument,
|
||||
# Cannot remove yet due to inadequacy of rules
|
||||
inconsistent-return-statements, # doesn't notice that fail_json() does not return
|
||||
# Buggy impementation in pylint:
|
||||
relative-beyond-top-level, # TODO
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time (only on the command line, not in the configuration file where
|
||||
# it should appear only once). See also the "--disable" option for examples.
|
||||
enable=
|
||||
|
||||
|
||||
[METHOD_ARGS]
|
||||
|
||||
# List of qualified names (i.e., library.method) which require a timeout
|
||||
# parameter e.g. 'requests.api.get,requests.api.post'
|
||||
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,
|
||||
XXX,
|
||||
TODO
|
||||
|
||||
# Regular expression of note tags to take in consideration.
|
||||
notes-rgx=
|
||||
|
||||
|
||||
[REFACTORING]
|
||||
|
||||
# Maximum number of nested blocks for function / method body
|
||||
max-nested-blocks=5
|
||||
|
||||
# Complete name of functions that never returns. When checking for
|
||||
# inconsistent-return-statements if a never returning function is called then
|
||||
# it will be considered as an explicit return statement and no message will be
|
||||
# printed.
|
||||
never-returning-functions=sys.exit,argparse.parse_error
|
||||
|
||||
# Let 'consider-using-join' be raised when the separator to join on would be
|
||||
# non-empty (resulting in expected fixes of the type: ``"- " + " -
|
||||
# ".join(items)``)
|
||||
suggest-join-with-non-empty-separator=yes
|
||||
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Python expression which should return a score less than or equal to 10. You
|
||||
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
|
||||
# 'convention', and 'info' which contain the number of messages in each
|
||||
# category, as well as 'statement' which is the total number of statements
|
||||
# analyzed. This score is used by the global evaluation report (RP0004).
|
||||
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
|
||||
|
||||
# Template used to display messages. This is a python new-style format string
|
||||
# used to format the message information. See doc for all details.
|
||||
msg-template=
|
||||
|
||||
# Set the output format. Available formats are: text, parseable, colorized,
|
||||
# json2 (improved json format), json (old json format) and msvs (visual
|
||||
# studio). You can also give a reporter class, e.g.
|
||||
# mypackage.mymodule.MyReporterClass.
|
||||
#output-format=
|
||||
|
||||
# Tells whether to display a full report or only the messages.
|
||||
reports=no
|
||||
|
||||
# Activate the evaluation score.
|
||||
score=yes
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Comments are removed from the similarity computation
|
||||
ignore-comments=yes
|
||||
|
||||
# Docstrings are removed from the similarity computation
|
||||
ignore-docstrings=yes
|
||||
|
||||
# Imports are removed from the similarity computation
|
||||
ignore-imports=yes
|
||||
|
||||
# Signatures are removed from the similarity computation
|
||||
ignore-signatures=yes
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
|
||||
[SPELLING]
|
||||
|
||||
# Limits count of emitted suggestions for spelling mistakes.
|
||||
max-spelling-suggestions=4
|
||||
|
||||
# Spelling dictionary name. No available dictionaries : You need to install
|
||||
# both the python package and the system dependency for enchant to work.
|
||||
spelling-dict=
|
||||
|
||||
# List of comma separated words that should be considered directives if they
|
||||
# appear at the beginning of a comment and should not be checked.
|
||||
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
|
||||
|
||||
# List of comma separated words that should not be checked.
|
||||
spelling-ignore-words=
|
||||
|
||||
# A path to a file that contains the private dictionary; one word per line.
|
||||
spelling-private-dict-file=
|
||||
|
||||
# Tells whether to store unknown words to the private dictionary (see the
|
||||
# --spelling-private-dict-file option) instead of raising a message.
|
||||
spelling-store-unknown-words=no
|
||||
|
||||
|
||||
[STRING]
|
||||
|
||||
# This flag controls whether inconsistent-quotes generates a warning when the
|
||||
# character used as a quote delimiter is used inconsistently within a module.
|
||||
check-quote-consistency=no
|
||||
|
||||
# This flag controls whether the implicit-str-concat should generate a warning
|
||||
# on implicit string concatenation in sequences defined over several lines.
|
||||
check-str-concat-over-line-jumps=no
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# List of decorators that produce context managers, such as
|
||||
# contextlib.contextmanager. Add to this list to register other decorators that
|
||||
# produce valid context managers.
|
||||
contextmanager-decorators=contextlib.contextmanager
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=
|
||||
|
||||
# Tells whether to warn about missing members when the owner of the attribute
|
||||
# is inferred to be None.
|
||||
ignore-none=yes
|
||||
|
||||
# This flag controls whether pylint should warn about no-member and similar
|
||||
# checks whenever an opaque object is returned when inferring. The inference
|
||||
# can return multiple potential results while evaluating a Python object, but
|
||||
# some branches might not be evaluated, which results in partial inference. In
|
||||
# that case, it might be useful to still emit no-member and other checks for
|
||||
# the rest of the inferred objects.
|
||||
ignore-on-opaque-inference=yes
|
||||
|
||||
# List of symbolic message names to ignore for Mixin members.
|
||||
ignored-checks-for-mixins=no-member,
|
||||
not-async-context-manager,
|
||||
not-context-manager,
|
||||
attribute-defined-outside-init
|
||||
|
||||
# List of class names for which member attributes should not be checked (useful
|
||||
# for classes with dynamically set attributes). This supports the use of
|
||||
# qualified names.
|
||||
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
|
||||
|
||||
# Show a hint with possible names when a member name was not found. The aspect
|
||||
# of finding the hint is based on edit distance.
|
||||
missing-member-hint=yes
|
||||
|
||||
# The minimum edit distance a name should have in order to be considered a
|
||||
# similar match for a missing member name.
|
||||
missing-member-hint-distance=1
|
||||
|
||||
# The total number of similar names that should be taken in consideration when
|
||||
# showing a hint for a missing member.
|
||||
missing-member-max-choices=1
|
||||
|
||||
# Regex pattern to define which classes are considered mixins.
|
||||
mixin-class-rgx=.*[Mm]ixin
|
||||
|
||||
# List of decorators that change the signature of a decorated function.
|
||||
signature-mutators=
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid defining new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
# Tells whether unused global variables should be treated as a violation.
|
||||
allow-global-unused-variables=yes
|
||||
|
||||
# List of names allowed to shadow builtins
|
||||
allowed-redefined-builtins=
|
||||
|
||||
# List of strings which can identify a callback function by name. A callback
|
||||
# name must start or end with one of those strings.
|
||||
callbacks=cb_,
|
||||
_cb
|
||||
|
||||
# A regular expression matching the name of dummy variables (i.e. expected to
|
||||
# not be used).
|
||||
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
|
||||
|
||||
# Argument names that match this expression will be ignored.
|
||||
ignored-argument-names=_.*|^ignored_|^unused_
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# List of qualified module names which can have objects that can redefine
|
||||
# builtins.
|
||||
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
|
||||
53
.yamllint
Normal file
53
.yamllint
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||
|
||||
extends: default
|
||||
|
||||
ignore: |
|
||||
/changelogs/
|
||||
|
||||
rules:
|
||||
line-length:
|
||||
max: 300
|
||||
level: error
|
||||
document-start:
|
||||
present: true
|
||||
document-end: false
|
||||
truthy:
|
||||
level: error
|
||||
allowed-values:
|
||||
- 'true'
|
||||
- 'false'
|
||||
indentation:
|
||||
spaces: 2
|
||||
indent-sequences: true
|
||||
key-duplicates: enable
|
||||
trailing-spaces: enable
|
||||
new-line-at-end-of-file: disable
|
||||
hyphens:
|
||||
max-spaces-after: 1
|
||||
empty-lines:
|
||||
max: 2
|
||||
max-start: 0
|
||||
max-end: 0
|
||||
commas:
|
||||
max-spaces-before: 0
|
||||
min-spaces-after: 1
|
||||
max-spaces-after: 1
|
||||
colons:
|
||||
max-spaces-before: 0
|
||||
max-spaces-after: 1
|
||||
brackets:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 0
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
octal-values:
|
||||
forbid-implicit-octal: true
|
||||
forbid-explicit-octal: true
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
comments-indentation: false
|
||||
54
.yamllint-docs
Normal file
54
.yamllint-docs
Normal file
@ -0,0 +1,54 @@
|
||||
---
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||
|
||||
extends: default
|
||||
|
||||
ignore: |
|
||||
/changelogs/
|
||||
|
||||
rules:
|
||||
line-length:
|
||||
max: 160
|
||||
level: error
|
||||
document-start:
|
||||
present: false
|
||||
document-end:
|
||||
present: false
|
||||
truthy:
|
||||
level: error
|
||||
allowed-values:
|
||||
- 'true'
|
||||
- 'false'
|
||||
indentation:
|
||||
spaces: 2
|
||||
indent-sequences: true
|
||||
key-duplicates: enable
|
||||
trailing-spaces: enable
|
||||
new-line-at-end-of-file: disable
|
||||
hyphens:
|
||||
max-spaces-after: 1
|
||||
empty-lines:
|
||||
max: 2
|
||||
max-start: 0
|
||||
max-end: 0
|
||||
commas:
|
||||
max-spaces-before: 0
|
||||
min-spaces-after: 1
|
||||
max-spaces-after: 1
|
||||
colons:
|
||||
max-spaces-before: 0
|
||||
max-spaces-after: 1
|
||||
brackets:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 0
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
octal-values:
|
||||
forbid-implicit-octal: true
|
||||
forbid-explicit-octal: true
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
comments-indentation: false
|
||||
54
.yamllint-examples
Normal file
54
.yamllint-examples
Normal file
@ -0,0 +1,54 @@
|
||||
---
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||
|
||||
extends: default
|
||||
|
||||
ignore: |
|
||||
/changelogs/
|
||||
|
||||
rules:
|
||||
line-length:
|
||||
max: 160
|
||||
level: error
|
||||
document-start:
|
||||
present: true
|
||||
document-end:
|
||||
present: false
|
||||
truthy:
|
||||
level: error
|
||||
allowed-values:
|
||||
- 'true'
|
||||
- 'false'
|
||||
indentation:
|
||||
spaces: 2
|
||||
indent-sequences: true
|
||||
key-duplicates: enable
|
||||
trailing-spaces: enable
|
||||
new-line-at-end-of-file: disable
|
||||
hyphens:
|
||||
max-spaces-after: 1
|
||||
empty-lines:
|
||||
max: 2
|
||||
max-start: 0
|
||||
max-end: 0
|
||||
commas:
|
||||
max-spaces-before: 0
|
||||
min-spaces-after: 1
|
||||
max-spaces-after: 1
|
||||
colons:
|
||||
max-spaces-before: 0
|
||||
max-spaces-after: 1
|
||||
brackets:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 0
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
octal-values:
|
||||
forbid-implicit-octal: true
|
||||
forbid-explicit-octal: true
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
comments-indentation: false
|
||||
2107
CHANGELOG.md
Normal file
2107
CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load Diff
3
CHANGELOG.md.license
Normal file
3
CHANGELOG.md.license
Normal file
@ -0,0 +1,3 @@
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
SPDX-FileCopyrightText: Ansible Project
|
||||
1169
CHANGELOG.rst
1169
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
3
CHANGELOG.rst.license
Normal file
3
CHANGELOG.rst.license
Normal file
@ -0,0 +1,3 @@
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
SPDX-FileCopyrightText: Ansible Project
|
||||
191
LICENSES/Apache-2.0.txt
Normal file
191
LICENSES/Apache-2.0.txt
Normal file
@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1
LICENSES/GPL-3.0-or-later.txt
Symbolic link
1
LICENSES/GPL-3.0-or-later.txt
Symbolic link
@ -0,0 +1 @@
|
||||
../COPYING
|
||||
@ -1,48 +0,0 @@
|
||||
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
--------------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
otherwise using this software ("Python") in source or binary form and
|
||||
its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
distribute, and otherwise use Python alone or in any derivative version,
|
||||
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;
|
||||
All Rights Reserved" are retained in Python alone or in any derivative version
|
||||
prepared by Licensee.
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python.
|
||||
|
||||
4. PSF is making Python available to Licensee on an "AS IS"
|
||||
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. Nothing in this License Agreement shall be deemed to create any
|
||||
relationship of agency, partnership, or joint venture between PSF and
|
||||
Licensee. This License Agreement does not grant permission to use PSF
|
||||
trademarks or trade name in a trademark sense to endorse or promote
|
||||
products or services of Licensee, or any third party.
|
||||
|
||||
8. By copying, installing or otherwise using Python, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
85
README.md
85
README.md
@ -1,43 +1,95 @@
|
||||
<!--
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
-->
|
||||
|
||||
# Docker Community Collection
|
||||
|
||||
[](https://docs.ansible.com/ansible/latest/collections/community/docker/)
|
||||
[](https://docs.ansible.com/ansible/devel/collections/community/docker/)
|
||||
[](https://dev.azure.com/ansible/community.docker/_build?definitionId=25)
|
||||
[](https://github.com/ansible-collections/community.docker/actions)
|
||||
[](https://codecov.io/gh/ansible-collections/community.docker)
|
||||
[](https://api.reuse.software/info/github.com/ansible-collections/community.docker)
|
||||
|
||||
This repo contains the `community.docker` Ansible Collection. The collection includes many modules and plugins to work with Docker.
|
||||
|
||||
Please note that this collection does **not** support Windows targets. The connection plugins included in this collection support Windows targets on a best-effort basis, but we are not testing this in CI.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project.
|
||||
|
||||
If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
|
||||
|
||||
## Communication
|
||||
|
||||
* Join the Ansible forum:
|
||||
* [Get Help](https://forum.ansible.com/c/help/6): get help or help others. Please add appropriate tags if you start new discussions, for example the `docker`, `docker-compose`, or `docker-swarm` tags.
|
||||
* [Posts tagged with 'docker'](https://forum.ansible.com/tag/docker): subscribe to participate in Docker related conversations.
|
||||
* [Posts tagged with 'docker-compose'](https://forum.ansible.com/tag/docker-compose): subscribe to participate in Docker Compose related conversations.
|
||||
* [Posts tagged with 'docker-swarm'](https://forum.ansible.com/tag/docker-swarm): subscribe to participate in Docker Swarm related conversations.
|
||||
* [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts.
|
||||
* [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events.
|
||||
|
||||
* The Ansible [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): used to announce releases and important changes.
|
||||
|
||||
For more information about communication, see the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 and ansible-core 2.13 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
|
||||
Please note that support for Ansible 2.9 and ansible-base 2.10 has been deprecated and will be dropped from community.docker 3.0.0 on.
|
||||
Tested with the current ansible-core 2.17, ansible-core 2.18, and ansible-core 2.19 releases, and the current development version of ansible-core. Ansible/ansible-base versions before 2.17.0 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
Most modules and plugins require the [Docker SDK for Python](https://pypi.org/project/docker/). For Python 2.6 support, use [the deprecated docker-py library](https://pypi.org/project/docker-py/) instead.
|
||||
Some modules and plugins require Docker CLI, or other external, programs. Some require the [Docker SDK for Python](https://pypi.org/project/docker/) and some use [requests](https://pypi.org/project/requests/) to directly communicate with the Docker daemon API. All modules and plugins require Python 2.7 or later. Python 2.6 is no longer supported; use community.docker 2.x.y if you need to use Python 2.6.
|
||||
|
||||
Please note that Python 2.6 support has been deprecated and will be dropped from community.docker 3.0.0 on.
|
||||
Installing the Docker SDK for Python also installs the requirements for the modules and plugins that use `requests`. If you want to directly install the Python libraries instead of the SDK, you need the following ones:
|
||||
|
||||
Both libraries cannot be installed at the same time. If you accidentally did install them simultaneously, you have to uninstall *both* before re-installing one of them.
|
||||
- [requests](https://pypi.org/project/requests/);
|
||||
- [pywin32](https://pypi.org/project/pywin32/) when using named pipes on Windows with the Windows 32 API;
|
||||
- [paramiko](https://pypi.org/project/paramiko/) when using SSH to connect to the Docker daemon with `use_ssh_client=false`;
|
||||
- [pyOpenSSL](https://pypi.org/project/pyOpenSSL/) when using TLS to connect to the Docker daemon;
|
||||
- [backports.ssl_match_hostname](https://pypi.org/project/backports.ssl_match_hostname/) when using TLS to connect to the Docker daemon on Python 2.
|
||||
|
||||
If you have Docker SDK for Python < 2.0.0 installed ([docker-py](https://pypi.org/project/docker-py/)), you can still use it for modules that support it, though we recommend to uninstall it and then install [docker](https://pypi.org/project/docker/), the Docker SDK for Python >= 2.0.0. Note that both libraries cannot be installed at the same time. If you accidentally did install them simultaneously, you have to uninstall *both* before re-installing one of them.
|
||||
|
||||
## Collection Documentation
|
||||
|
||||
Browsing the [**latest** collection documentation](https://docs.ansible.com/ansible/latest/collections/community/docker) will show docs for the _latest version released in the Ansible package_, not the latest version of the collection released on Galaxy.
|
||||
|
||||
Browsing the [**devel** collection documentation](https://docs.ansible.com/ansible/devel/collections/community/docker) shows docs for the _latest version released on Galaxy_.
|
||||
|
||||
We also separately publish [**latest commit** collection documentation](https://ansible-collections.github.io/community.docker/branch/main/) which shows docs for the _latest commit in the `main` branch_.
|
||||
|
||||
If you use the Ansible package and do not update collections independently, use **latest**. If you install or update this collection directly from Galaxy, use **devel**. If you are looking to contribute, use **latest commit**.
|
||||
|
||||
## Included content
|
||||
|
||||
* Connection plugins:
|
||||
- community.docker.docker: use Docker containers as remotes
|
||||
- community.docker.docker: use Docker containers as remotes using the Docker CLI program
|
||||
- community.docker.docker_api: use Docker containers as remotes using the Docker API
|
||||
- community.docker.nsenter: execute commands on the host running the controller container
|
||||
* Inventory plugins:
|
||||
- community.docker.docker_containers: dynamic inventory plugin for Docker containers
|
||||
- community.docker.docker_machine: collect Docker machines as inventory
|
||||
- community.docker.docker_swarm: collect Docker Swarm nodes as inventory
|
||||
* Modules:
|
||||
* Docker:
|
||||
- community.docker.docker_container: manage Docker containers
|
||||
- community.docker.docker_container_copy_into: copy a file into a Docker container
|
||||
- community.docker.docker_container_exec: run commands in Docker containers
|
||||
- community.docker.docker_container_info: retrieve information on Docker containers
|
||||
- community.docker.docker_host_info: retrieve information on the Docker daemon
|
||||
- community.docker.docker_image: manage Docker images
|
||||
- community.docker.docker_image_build: build Docker images using Docker buildx
|
||||
- community.docker.docker_image_export: export (archive) Docker images
|
||||
- community.docker.docker_image_info: retrieve information on Docker images
|
||||
- community.docker.docker_image_load: load Docker images from archives
|
||||
- community.docker.docker_image_pull: pull Docker images from registries
|
||||
- community.docker.docker_image_push: push Docker images to registries
|
||||
- community.docker.docker_image_remove: remove Docker images
|
||||
- community.docker.docker_image_tag: tag Docker images with new names and/or tags
|
||||
- community.docker.docker_login: log in and out to/from registries
|
||||
- community.docker.docker_network: manage Docker networks
|
||||
- community.docker.docker_network_info: retrieve information on Docker networks
|
||||
@ -46,7 +98,10 @@ Both libraries cannot be installed at the same time. If you accidentally did ins
|
||||
- community.docker.docker_volume: manage Docker volumes
|
||||
- community.docker.docker_volume_info: retrieve information on Docker volumes
|
||||
* Docker Compose:
|
||||
- community.docker.docker_compose: manage Docker Compose files
|
||||
- community.docker.docker_compose_v2: manage Docker Compose files (Docker compose CLI plugin)
|
||||
- community.docker.docker_compose_v2_exec: run command in a container of a Compose service
|
||||
- community.docker.docker_compose_v2_pull: pull a Docker compose project
|
||||
- community.docker.docker_compose_v2_run: run command in a new container of a Compose service
|
||||
* Docker Swarm:
|
||||
- community.docker.docker_config: manage configurations
|
||||
- community.docker.docker_node: manage Docker Swarm nodes
|
||||
@ -65,7 +120,7 @@ Both libraries cannot be installed at the same time. If you accidentally did ins
|
||||
|
||||
## Using this collection
|
||||
|
||||
Before using the General community collection, you need to install the collection with the `ansible-galaxy` CLI:
|
||||
Before using the Docker community collection, you need to install the collection with the `ansible-galaxy` CLI:
|
||||
|
||||
ansible-galaxy collection install community.docker
|
||||
|
||||
@ -86,7 +141,7 @@ You can find more information in the [developer guide for collections](https://d
|
||||
|
||||
## Release notes
|
||||
|
||||
See the [changelog](https://github.com/ansible-collections/community.docker/tree/main/CHANGELOG.rst).
|
||||
See the [changelog](https://github.com/ansible-collections/community.docker/tree/main/CHANGELOG.md).
|
||||
|
||||
## More information
|
||||
|
||||
@ -100,6 +155,10 @@ See the [changelog](https://github.com/ansible-collections/community.docker/tree
|
||||
|
||||
## Licensing
|
||||
|
||||
GNU General Public License v3.0 or later.
|
||||
This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later.
|
||||
|
||||
See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text.
|
||||
See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.docker/blob/main/COPYING) for the full text.
|
||||
|
||||
Parts of the collection are licensed under the [Apache 2.0 license](https://github.com/ansible-collections/community.docker/blob/main/LICENSES/Apache-2.0.txt). This mostly applies to files vendored from the [Docker SDK for Python](https://github.com/docker/docker-py/).
|
||||
|
||||
All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `REUSE.toml`. This conforms to the [REUSE specification](https://reuse.software/spec/).
|
||||
|
||||
11
REUSE.toml
Normal file
11
REUSE.toml
Normal file
@ -0,0 +1,11 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
version = 1
|
||||
|
||||
[[annotations]]
|
||||
path = "changelogs/fragments/**"
|
||||
precedence = "aggregate"
|
||||
SPDX-FileCopyrightText = "Ansible Project"
|
||||
SPDX-License-Identifier = "GPL-3.0-or-later"
|
||||
265
antsibull-nox.toml
Normal file
265
antsibull-nox.toml
Normal file
@ -0,0 +1,265 @@
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||
|
||||
[collection_sources]
|
||||
"ansible.posix" = "git+https://github.com/ansible-collections/ansible.posix.git,main"
|
||||
"community.general" = "git+https://github.com/ansible-collections/community.general.git,main"
|
||||
"community.internal_test_tools" = "git+https://github.com/ansible-collections/community.internal_test_tools.git,main"
|
||||
"community.library_inventory_filtering_v1" = "git+https://github.com/ansible-collections/community.library_inventory_filtering.git,stable-1"
|
||||
|
||||
[vcs]
|
||||
vcs = "git"
|
||||
development_branch = "main"
|
||||
stable_branches = [ "stable-*" ]
|
||||
|
||||
[sessions]
|
||||
|
||||
[sessions.lint]
|
||||
run_isort = true
|
||||
isort_config = ".isort.cfg"
|
||||
run_black = true
|
||||
run_ruff_check = true
|
||||
ruff_check_config = "ruff.toml"
|
||||
run_flake8 = true
|
||||
flake8_config = ".flake8"
|
||||
run_pylint = true
|
||||
pylint_rcfile = ".pylintrc"
|
||||
run_yamllint = true
|
||||
yamllint_config = ".yamllint"
|
||||
yamllint_config_plugins = ".yamllint-docs"
|
||||
yamllint_config_plugins_examples = ".yamllint-examples"
|
||||
run_mypy = true
|
||||
mypy_ansible_core_package = "ansible-core>=2.19.0"
|
||||
mypy_config = ".mypy.ini"
|
||||
mypy_extra_deps = [
|
||||
"docker",
|
||||
"paramiko",
|
||||
"urllib3",
|
||||
"requests",
|
||||
"types-mock",
|
||||
"types-paramiko",
|
||||
"types-pywin32",
|
||||
"types-PyYAML",
|
||||
"types-requests",
|
||||
]
|
||||
|
||||
[sessions.docs_check]
|
||||
validate_collection_refs="all"
|
||||
codeblocks_restrict_types = [
|
||||
"ansible-output",
|
||||
"console",
|
||||
"yaml",
|
||||
"yaml+jinja",
|
||||
]
|
||||
codeblocks_restrict_type_exact_case = true
|
||||
codeblocks_allow_without_type = false
|
||||
codeblocks_allow_literal_blocks = false
|
||||
|
||||
[sessions.license_check]
|
||||
|
||||
[sessions.extra_checks]
|
||||
run_no_unwanted_files = true
|
||||
no_unwanted_files_module_extensions = [".py"]
|
||||
no_unwanted_files_yaml_extensions = [".yml"]
|
||||
run_action_groups = true
|
||||
run_no_trailing_whitespace = true
|
||||
run_avoid_characters = true
|
||||
|
||||
[[sessions.extra_checks.action_groups_config]]
|
||||
name = "docker"
|
||||
pattern = "^.*$"
|
||||
exclusions = [
|
||||
"current_container_facts",
|
||||
"docker_context_info",
|
||||
]
|
||||
doc_fragment = "community.docker._attributes.actiongroup_docker"
|
||||
|
||||
[[sessions.extra_checks.avoid_character_group]]
|
||||
name = "tab"
|
||||
regex = "\\x09"
|
||||
skip_directories = [
|
||||
"tests/images/",
|
||||
]
|
||||
|
||||
[sessions.build_import_check]
|
||||
run_galaxy_importer = true
|
||||
|
||||
[sessions.ansible_test_sanity]
|
||||
include_devel = true
|
||||
|
||||
[sessions.ansible_test_units]
|
||||
include_devel = true
|
||||
|
||||
[sessions.ansible_test_integration]
|
||||
session_name_template = "ansible-test-integration-{ansible_core}{dash_docker_short}{dash_remote}{dash_python_version}{dash_target_dashized}"
|
||||
display_name_template = "main+Ⓐ{ansible_core}{plus_docker_short}{plus_remote}{plus_py_python_version}{plus_target}{plus_force_docker_sdk_for_python_dev}{plus_force_docker_sdk_for_python_pypi}"
|
||||
description_template = "Run main integration tests with ansible-core {ansible_core}{comma_docker_short}{comma_remote}{comma_py_python_version}{comma_target}{comma_force_docker_sdk_for_python_dev}{comma_force_docker_sdk_for_python_pypi}"
|
||||
|
||||
[sessions.ansible_test_integration.ansible_vars]
|
||||
force_docker_sdk_for_python_dev = { type = "value", value = false, template_value = "" }
|
||||
force_docker_sdk_for_python_pypi = { type = "value", value = false, template_value = "" }
|
||||
|
||||
##################################################################################################
|
||||
|
||||
# Ansible-core 2.17:
|
||||
|
||||
[[sessions.ansible_test_integration.groups]]
|
||||
session_name = "ansible-test-integration-2.17"
|
||||
description = "Meta session for running all ansible-test-integration-2.17-* sessions."
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "2.17"
|
||||
target = [ "azp/4/", "azp/5/" ]
|
||||
docker = [ "fedora39", "ubuntu2004", "alpine319" ]
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "2.17"
|
||||
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||
remote = [ "rhel/9.3" ]
|
||||
|
||||
# Ansible-core 2.18:
|
||||
|
||||
[[sessions.ansible_test_integration.groups]]
|
||||
session_name = "ansible-test-integration-2.18"
|
||||
description = "Meta session for running all ansible-test-integration-2.18-* sessions."
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "2.18"
|
||||
target = [ "azp/4/", "azp/5/" ]
|
||||
docker = [ "fedora40", "ubuntu2204", "alpine320" ]
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "2.18"
|
||||
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||
remote = [ "rhel/9.4" ]
|
||||
|
||||
# Ansible-core 2.19:
|
||||
|
||||
[[sessions.ansible_test_integration.groups]]
|
||||
session_name = "ansible-test-integration-2.19"
|
||||
description = "Meta session for running all ansible-test-integration-2.19-* sessions."
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "2.19"
|
||||
target = [ "azp/4/", "azp/5/" ]
|
||||
docker = [ "fedora41", "alpine321" ]
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "2.19"
|
||||
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||
remote = [ "rhel/9.5", "ubuntu/22.04" ]
|
||||
|
||||
# Ansible-core 2.20:
|
||||
|
||||
[[sessions.ansible_test_integration.groups]]
|
||||
session_name = "ansible-test-integration-2.20"
|
||||
description = "Meta session for running all ansible-test-integration-2.20-* sessions."
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "2.20"
|
||||
target = [ "azp/4/", "azp/5/" ]
|
||||
docker = [ "fedora42", "alpine322" ]
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "2.20"
|
||||
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||
remote = [ "rhel/9.6" ]
|
||||
|
||||
# Ansible-core devel:
|
||||
|
||||
[[sessions.ansible_test_integration.groups]]
|
||||
session_name = "ansible-test-integration-devel"
|
||||
description = "Meta session for running all ansible-test-integration-devel-* sessions."
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "devel"
|
||||
target = [ "azp/4/", "azp/5/" ]
|
||||
docker = [ "fedora42", "ubuntu2204", "ubuntu2404", "alpine322" ]
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "devel"
|
||||
target = [ "azp/4/", "azp/5/" ]
|
||||
python_version = "3.9"
|
||||
docker = "quay.io/ansible-community/test-image:debian-bullseye"
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "devel"
|
||||
target = [ "azp/4/", "azp/5/" ]
|
||||
python_version = "3.11"
|
||||
docker = "quay.io/ansible-community/test-image:debian-bookworm"
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "devel"
|
||||
target = [ "azp/4/", "azp/5/" ]
|
||||
python_version = "3.13"
|
||||
docker = "quay.io/ansible-community/test-image:debian-13-trixie"
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "devel"
|
||||
target = [ "azp/4/", "azp/5/" ]
|
||||
python_version = "3.13"
|
||||
docker = "quay.io/ansible-community/test-image:archlinux"
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "devel"
|
||||
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||
remote = [ "rhel/9.6" ]
|
||||
ansible_vars = { force_docker_sdk_for_python_dev = { type = "value", value = true, template_value = "sdk-dev-latest" } }
|
||||
|
||||
[[sessions.ansible_test_integration.groups.sessions]]
|
||||
ansible_core = "devel"
|
||||
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||
remote = [
|
||||
"rhel/10.0",
|
||||
# For some reason, Ubuntu 24.04 is *extremely* slower than RHEL 9.6
|
||||
# "ubuntu/24.04",
|
||||
]
|
||||
|
||||
##################################################################################################
|
||||
|
||||
[sessions.ansible_lint]
|
||||
ansible_lint_package = [
|
||||
"ansible-lint",
|
||||
"ansible-compat < 25.8.2",
|
||||
]
|
||||
|
||||
[[sessions.ee_check.execution_environments]]
|
||||
name = "devel-ubi-9"
|
||||
description = "ansible-core devel @ RHEL UBI 9"
|
||||
test_playbooks = ["tests/ee/all.yml"]
|
||||
config.images.base_image.name = "docker.io/redhat/ubi9:latest"
|
||||
config.dependencies.ansible_core.package_pip = "https://github.com/ansible/ansible/archive/devel.tar.gz"
|
||||
config.dependencies.ansible_runner.package_pip = "ansible-runner"
|
||||
config.dependencies.python_interpreter.package_system = "python3.12 python3.12-pip python3.12-wheel python3.12-cryptography"
|
||||
config.dependencies.python_interpreter.python_path = "/usr/bin/python3.12"
|
||||
runtime_environment = {"ANSIBLE_PRIVATE_ROLE_VARS" = "true"}
|
||||
runtime_container_options = [
|
||||
# Mount Docker socket into the container so we can talk to Docker outside the container
|
||||
"-v",
|
||||
"/var/run/docker.sock:/var/run/docker.sock",
|
||||
# Need to be root so we can access /var/run/docker.sock, which usually isn't accessible by the user,
|
||||
# but only by the group the user is in (but that group membership isn't there in the container)
|
||||
"--user",
|
||||
"0",
|
||||
]
|
||||
|
||||
[[sessions.ee_check.execution_environments]]
|
||||
name = "2.17-rocky-9"
|
||||
description = "ansible-core 2.17 @ Rocky Linux 9"
|
||||
test_playbooks = ["tests/ee/all.yml"]
|
||||
config.images.base_image.name = "quay.io/rockylinux/rockylinux:9"
|
||||
config.dependencies.ansible_core.package_pip = "https://github.com/ansible/ansible/archive/stable-2.17.tar.gz"
|
||||
config.dependencies.ansible_runner.package_pip = "ansible-runner"
|
||||
config.dependencies.python_interpreter.package_system = "python3.11 python3.11-pip python3.11-wheel python3.11-cryptography"
|
||||
config.dependencies.python_interpreter.python_path = "/usr/bin/python3.11"
|
||||
runtime_environment = {"ANSIBLE_PRIVATE_ROLE_VARS" = "true"}
|
||||
runtime_container_options = [
|
||||
# Mount Docker socket into the container so we can talk to Docker outside the container
|
||||
"-v",
|
||||
"/var/run/docker.sock:/var/run/docker.sock",
|
||||
# Need to be root so we can access /var/run/docker.sock, which usually isn't accessible by the user,
|
||||
# but only by the group the user is in (but that group membership isn't there in the container)
|
||||
"--user",
|
||||
"0",
|
||||
]
|
||||
File diff suppressed because it is too large
Load Diff
3
changelogs/changelog.yaml.license
Normal file
3
changelogs/changelog.yaml.license
Normal file
@ -0,0 +1,3 @@
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
SPDX-FileCopyrightText: Ansible Project
|
||||
@ -1,29 +1,43 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
changelog_filename_template: ../CHANGELOG.rst
|
||||
changelog_filename_version_depth: 0
|
||||
changes_file: changelog.yaml
|
||||
changes_format: combined
|
||||
ignore_other_fragment_extensions: true
|
||||
keep_fragments: false
|
||||
mention_ancestor: true
|
||||
new_plugins_after_name: removed_features
|
||||
notesdir: fragments
|
||||
output_formats:
|
||||
- md
|
||||
- rst
|
||||
prelude_section_name: release_summary
|
||||
prelude_section_title: Release Summary
|
||||
sections:
|
||||
- - major_changes
|
||||
- Major Changes
|
||||
- - minor_changes
|
||||
- Minor Changes
|
||||
- - breaking_changes
|
||||
- Breaking Changes / Porting Guide
|
||||
- - deprecated_features
|
||||
- Deprecated Features
|
||||
- - removed_features
|
||||
- Removed Features (previously deprecated)
|
||||
- - security_fixes
|
||||
- Security Fixes
|
||||
- - bugfixes
|
||||
- Bugfixes
|
||||
- - known_issues
|
||||
- Known Issues
|
||||
- - major_changes
|
||||
- Major Changes
|
||||
- - minor_changes
|
||||
- Minor Changes
|
||||
- - breaking_changes
|
||||
- Breaking Changes / Porting Guide
|
||||
- - deprecated_features
|
||||
- Deprecated Features
|
||||
- - removed_features
|
||||
- Removed Features (previously deprecated)
|
||||
- - security_fixes
|
||||
- Security Fixes
|
||||
- - bugfixes
|
||||
- Bugfixes
|
||||
- - known_issues
|
||||
- Known Issues
|
||||
title: Docker Community Collection
|
||||
trivial_section_name: trivial
|
||||
use_fqcn: true
|
||||
add_plugin_period: true
|
||||
changelog_nice_yaml: true
|
||||
changelog_sort: version
|
||||
vcs: auto
|
||||
|
||||
18
docs/docsite/config.yml
Normal file
18
docs/docsite/config.yml
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# The following `.. envvar::` directives are defined in the extra docsite docs:
|
||||
envvar_directives:
|
||||
- DOCKER_HOST
|
||||
- DOCKER_API_VERSION
|
||||
- DOCKER_TIMEOUT
|
||||
- DOCKER_CERT_PATH
|
||||
- DOCKER_SSL_VERSION
|
||||
- DOCKER_TLS
|
||||
- DOCKER_TLS_HOSTNAME
|
||||
- DOCKER_TLS_VERIFY
|
||||
|
||||
changelog:
|
||||
write_changelog: true
|
||||
@ -1,4 +1,8 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
sections:
|
||||
- title: Scenario Guide
|
||||
toctree:
|
||||
|
||||
@ -1,10 +1,20 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
edit_on_github:
|
||||
repository: ansible-collections/community.docker
|
||||
branch: main
|
||||
path_prefix: ''
|
||||
|
||||
extra_links:
|
||||
- description: Ask for help (Docker)
|
||||
url: https://forum.ansible.com/tags/c/help/6/none/docker
|
||||
- description: Ask for help (Docker Compose)
|
||||
url: https://forum.ansible.com/tags/c/help/6/none/docker-compose
|
||||
- description: Ask for help (Docker Swarm)
|
||||
url: https://forum.ansible.com/tags/c/help/6/none/docker-swarm
|
||||
- description: Submit a bug report
|
||||
url: https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=bug_report.md
|
||||
- description: Request a feature
|
||||
@ -18,6 +28,16 @@ communication:
|
||||
- topic: General usage and support questions
|
||||
network: Libera
|
||||
channel: '#ansible'
|
||||
mailing_lists:
|
||||
- topic: Ansible Project List
|
||||
url: https://groups.google.com/g/ansible-project
|
||||
forums:
|
||||
- topic: "Ansible Forum: General usage and support questions"
|
||||
# The following URL directly points to the "Get Help" section
|
||||
url: https://forum.ansible.com/c/help/6/none
|
||||
- topic: "Ansible Forum: Discussions about Docker"
|
||||
# The following URL directly points to the "docker" tag
|
||||
url: https://forum.ansible.com/tag/docker
|
||||
- topic: "Ansible Forum: Discussions about Docker Compose"
|
||||
# The following URL directly points to the "docker-compose" tag
|
||||
url: https://forum.ansible.com/tag/docker-compose
|
||||
- topic: "Ansible Forum: Discussions about Docker Swarm"
|
||||
# The following URL directly points to the "docker-swarm" tag
|
||||
url: https://forum.ansible.com/tag/docker-swarm
|
||||
|
||||
@ -1,9 +1,14 @@
|
||||
..
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
.. _ansible_collections.community.docker.docsite.scenario_guide:
|
||||
|
||||
Docker Guide
|
||||
============
|
||||
|
||||
The `community.docker collection <https://galaxy.ansible.com/community/docker>`_ offers several modules and plugins for orchestrating Docker containers and Docker Swarm.
|
||||
The `community.docker collection <https://galaxy.ansible.com/ui/repo/published/community/docker/>`_ offers several modules and plugins for orchestrating Docker containers and Docker Swarm.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
@ -13,31 +18,23 @@ The `community.docker collection <https://galaxy.ansible.com/community/docker>`_
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Most of the modules and plugins in community.docker require the `Docker SDK for Python <https://docker-py.readthedocs.io/en/stable/>`_. The SDK needs to be installed on the machines where the modules and plugins are executed, and for the Python version(s) with which the modules and plugins are executed. You can use the :ref:`community.general.python_requirements_info module <ansible_collections.community.general.python_requirements_info_module>` to make sure that the Docker SDK for Python is installed on the correct machine and for the Python version used by Ansible.
|
||||
Most of the modules and plugins in community.docker require the `Docker SDK for Python <https://docker-py.readthedocs.io/en/stable/>`_. The SDK needs to be installed on the machines where the modules and plugins are executed, and for the Python version(s) with which the modules and plugins are executed. You can use the :ansplugin:`community.general.python_requirements_info module <community.general.python_requirements_info#module>` to make sure that the Docker SDK for Python is installed on the correct machine and for the Python version used by Ansible.
|
||||
|
||||
Note that plugins (inventory plugins and connection plugins) are always executed in the context of Ansible itself. If you use a plugin that requires the Docker SDK for Python, you need to install it on the machine running ``ansible`` or ``ansible-playbook`` and for the same Python interpreter used by Ansible. To see which Python is used, run ``ansible --version``.
|
||||
|
||||
You can install the Docker SDK for Python for Python 3.6 or later as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: console
|
||||
|
||||
$ pip install docker
|
||||
|
||||
For Python 2.7, you need to use a version between 2.0.0 and 4.4.4 since the Python package for Docker removed support for Python 2.7 on 5.0.0. You can install the specific version of the Docker SDK for Python as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: console
|
||||
|
||||
$ pip install 'docker==4.4.4'
|
||||
|
||||
For Python 2.6, you need a version before 2.0.0. For these versions, the SDK was called ``docker-py``, so you need to install it as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install 'docker-py>=1.10.0'
|
||||
|
||||
Please install only one of ``docker`` or ``docker-py``. Installing both will result in a broken installation. If this happens, Ansible will detect it and inform you about it. If that happens, you must uninstall both and reinstall the correct version.
|
||||
|
||||
If in doubt, always install ``docker`` and never ``docker-py``.
|
||||
Note that the Docker SDK for Python was called ``docker-py`` on PyPi before version 2.0.0. Please avoid installing this really old version, and make sure to not install both ``docker`` and ``docker-py``. Installing both will result in a broken installation. If this happens, Ansible will detect it and inform you about it. If that happens, you must uninstall both and reinstall the correct version. If in doubt, always install ``docker`` and never ``docker-py``.
|
||||
|
||||
|
||||
Connecting to the Docker API
|
||||
@ -52,7 +49,7 @@ Parameters
|
||||
Most plugins and modules can be configured by the following parameters:
|
||||
|
||||
docker_host
|
||||
The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix://var/run/docker.sock``. To connect to a remote host, provide the TCP connection string (for example: ``tcp://192.0.2.23:2376``). If TLS is used to encrypt the connection to the API, then the module will automatically replace 'tcp' in the connection URL with 'https'.
|
||||
The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix:///var/run/docker.sock``. To connect to a remote host, provide the TCP connection string (for example: ``tcp://192.0.2.23:2376``). If TLS is used to encrypt the connection to the API, then the module will automatically replace ``tcp`` in the connection URL with ``https``.
|
||||
|
||||
api_version
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by the Docker SDK for Python installed.
|
||||
@ -66,7 +63,7 @@ Most plugins and modules can be configured by the following parameters:
|
||||
validate_certs
|
||||
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. Default is ``false``.
|
||||
|
||||
cacert_path
|
||||
ca_path
|
||||
Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
|
||||
cert_path
|
||||
@ -81,6 +78,58 @@ Most plugins and modules can be configured by the following parameters:
|
||||
ssl_version
|
||||
Provide a valid SSL version number. The default value is determined by the Docker SDK for Python.
|
||||
|
||||
This option is not available for the CLI based plugins. It is mainly needed for legacy systems and should be avoided.
|
||||
|
||||
|
||||
Module default group
|
||||
....................
|
||||
|
||||
To avoid having to specify common parameters for all the modules in every task, you can use the ``community.docker.docker`` :ref:`module defaults group <module_defaults_groups>`, or its short name ``docker``.
|
||||
|
||||
.. note::
|
||||
|
||||
Module default groups only work for modules, not for plugins (connection and inventory plugins).
|
||||
|
||||
The following example shows how the module default group can be used in a playbook:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Pull image and start the container
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
module_defaults:
|
||||
group/community.docker.docker:
|
||||
# Select Docker Daemon on other host
|
||||
docker_host: tcp://192.0.2.23:2376
|
||||
# Configure TLS
|
||||
tls: true
|
||||
validate_certs: true
|
||||
tls_hostname: docker.example.com
|
||||
ca_path: /path/to/cacert.pem
|
||||
# Increase timeout
|
||||
timeout: 120
|
||||
tasks:
|
||||
- name: Pull image
|
||||
community.docker.docker_image_pull:
|
||||
name: python
|
||||
tag: 3.12
|
||||
|
||||
- name: Start container
|
||||
community.docker.docker_container:
|
||||
cleanup: true
|
||||
command: python --version
|
||||
detach: false
|
||||
image: python:3.12
|
||||
name: my-python-container
|
||||
output_logs: true
|
||||
|
||||
- name: Show output
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ output.container.Output }}"
|
||||
|
||||
Here the two ``community.docker`` tasks will use the options set for the module defaults group.
|
||||
|
||||
|
||||
Environment variables
|
||||
.....................
|
||||
@ -89,27 +138,38 @@ You can also control how the plugins and modules connect to the Docker API by se
|
||||
|
||||
For plugins, they have to be set for the environment Ansible itself runs in. For modules, they have to be set for the environment the modules are executed in. For modules running on remote machines, the environment variables have to be set on that machine for the user used to execute the modules with.
|
||||
|
||||
DOCKER_HOST
|
||||
The URL or Unix socket path used to connect to the Docker API.
|
||||
.. envvar:: DOCKER_HOST
|
||||
|
||||
DOCKER_API_VERSION
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
|
||||
by docker-py.
|
||||
The URL or Unix socket path used to connect to the Docker API.
|
||||
|
||||
DOCKER_TIMEOUT
|
||||
The maximum amount of time in seconds to wait on a response from the API.
|
||||
.. envvar:: DOCKER_API_VERSION
|
||||
|
||||
DOCKER_CERT_PATH
|
||||
Path to the directory containing the client certificate, client key and CA certificate.
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
|
||||
by Docker SDK for Python.
|
||||
|
||||
DOCKER_SSL_VERSION
|
||||
Provide a valid SSL version number.
|
||||
.. envvar:: DOCKER_TIMEOUT
|
||||
|
||||
DOCKER_TLS
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host.
|
||||
The maximum amount of time in seconds to wait on a response from the API.
|
||||
|
||||
DOCKER_TLS_VERIFY
|
||||
Secure the connection to the API by using TLS and verify the authenticity of the Docker Host.
|
||||
.. envvar:: DOCKER_CERT_PATH
|
||||
|
||||
Path to the directory containing the client certificate, client key and CA certificate.
|
||||
|
||||
.. envvar:: DOCKER_SSL_VERSION
|
||||
|
||||
Provide a valid SSL version number.
|
||||
|
||||
.. envvar:: DOCKER_TLS
|
||||
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host.
|
||||
|
||||
.. envvar:: DOCKER_TLS_HOSTNAME
|
||||
|
||||
When verifying the authenticity of the Docker Host, uses this hostname to compare to the host's certificate.
|
||||
|
||||
.. envvar:: DOCKER_TLS_VERIFY
|
||||
|
||||
Secure the connection to the API by using TLS and verify the authenticity of the Docker Host.
|
||||
|
||||
|
||||
Plain Docker daemon: images, networks, volumes, and containers
|
||||
@ -118,70 +178,114 @@ Plain Docker daemon: images, networks, volumes, and containers
|
||||
For working with a plain Docker daemon, that is without Swarm, there are connection plugins, an inventory plugin, and several modules available:
|
||||
|
||||
docker connection plugin
|
||||
The :ref:`community.docker.docker connection plugin <ansible_collections.community.docker.docker_connection>` uses the Docker CLI utility to connect to Docker containers and execute modules in them. It essentially wraps ``docker exec`` and ``docker cp``. This connection plugin is supported by the :ref:`ansible.posix.synchronize module <ansible_collections.ansible.posix.synchronize_module>`.
|
||||
The :ansplugin:`community.docker.docker connection plugin <community.docker.docker#connection>` uses the Docker CLI utility to connect to Docker containers and execute modules in them. It essentially wraps ``docker exec`` and ``docker cp``. This connection plugin is supported by the :ansplugin:`ansible.posix.synchronize module <ansible.posix.synchronize#module>`.
|
||||
|
||||
docker_api connection plugin
|
||||
The :ref:`community.docker.docker_api connection plugin <ansible_collections.community.docker.docker_api_connection>` talks directly to the Docker daemon to connect to Docker containers and execute modules in them.
|
||||
The :ansplugin:`community.docker.docker_api connection plugin <community.docker.docker_api#connection>` talks directly to the Docker daemon to connect to Docker containers and execute modules in them.
|
||||
|
||||
docker_containers inventory plugin
|
||||
The :ref:`community.docker.docker_containers inventory plugin <ansible_collections.community.docker.docker_containers_inventory>` allows you to dynamically add Docker containers from a Docker Daemon to your Ansible inventory. See :ref:`dynamic_inventory` for details on dynamic inventories.
|
||||
The :ansplugin:`community.docker.docker_containers inventory plugin <community.docker.docker_containers#inventory>` allows you to dynamically add Docker containers from a Docker Daemon to your Ansible inventory. See :ref:`dynamic_inventory` for details on dynamic inventories.
|
||||
|
||||
The `docker inventory script <https://github.com/ansible-community/contrib-scripts/blob/main/inventory/docker.py>`_ is deprecated. Please use the inventory plugin instead. The inventory plugin has several compatibility options. If you need to collect Docker containers from multiple Docker daemons, you need to add every Docker daemon as an individual inventory source.
|
||||
|
||||
docker_host_info module
|
||||
The :ref:`community.docker.docker_host_info module <ansible_collections.community.docker.docker_host_info_module>` allows you to retrieve information on a Docker daemon, such as all containers, images, volumes, networks and so on.
|
||||
The :ansplugin:`community.docker.docker_host_info module <community.docker.docker_host_info#module>` allows you to retrieve information on a Docker daemon, such as all containers, images, volumes, networks and so on.
|
||||
|
||||
docker_login module
|
||||
The :ref:`community.docker.docker_login module <ansible_collections.community.docker.docker_login_module>` allows you to log in and out of a remote registry, such as Docker Hub or a private registry. It provides similar functionality to the ``docker login`` and ``docker logout`` CLI commands.
|
||||
The :ansplugin:`community.docker.docker_login module <community.docker.docker_login#module>` allows you to log in and out of a remote registry, such as Docker Hub or a private registry. It provides similar functionality to the ``docker login`` and ``docker logout`` CLI commands.
|
||||
|
||||
docker_prune module
|
||||
The :ref:`community.docker.docker_prune module <ansible_collections.community.docker.docker_prune_module>` allows you to prune no longer needed containers, images, volumes and so on. It provides similar functionality to the ``docker prune`` CLI command.
|
||||
The :ansplugin:`community.docker.docker_prune module <community.docker.docker_prune#module>` allows you to prune no longer needed containers, images, volumes and so on. It provides similar functionality to the ``docker prune`` CLI command.
|
||||
|
||||
docker_image module
|
||||
The :ref:`community.docker.docker_image module <ansible_collections.community.docker.docker_image_module>` provides full control over images, including: build, pull, push, tag and remove.
|
||||
The :ansplugin:`community.docker.docker_image module <community.docker.docker_image#module>` provides full control over images, including: build, pull, push, tag and remove.
|
||||
|
||||
docker_image_build
|
||||
The :ansplugin:`community.docker.docker_image_build module <community.docker.docker_image_build#module>` allows you to build a Docker image using Docker buildx.
|
||||
|
||||
docker_image_export module
|
||||
The :ansplugin:`community.docker.docker_image_export module <community.docker.docker_image_export#module>` allows you to export (archive) images.
|
||||
|
||||
docker_image_info module
|
||||
The :ref:`community.docker.docker_image_info module <ansible_collections.community.docker.docker_image_info_module>` allows you to list and inspect images.
|
||||
The :ansplugin:`community.docker.docker_image_info module <community.docker.docker_image_info#module>` allows you to list and inspect images.
|
||||
|
||||
docker_image_load
|
||||
The :ansplugin:`community.docker.docker_image_load module <community.docker.docker_image_load#module>` allows you to import one or multiple images from tarballs.
|
||||
|
||||
docker_image_pull
|
||||
The :ansplugin:`community.docker.docker_image_pull module <community.docker.docker_image_pull#module>` allows you to pull a Docker image from a registry.
|
||||
|
||||
docker_image_push
|
||||
The :ansplugin:`community.docker.docker_image_push module <community.docker.docker_image_push#module>` allows you to push a Docker image to a registry.
|
||||
|
||||
docker_image_remove
|
||||
The :ansplugin:`community.docker.docker_image_remove module <community.docker.docker_image_remove#module>` allows you to remove and/or untag a Docker image from the Docker daemon.
|
||||
|
||||
docker_image_tag
|
||||
The :ansplugin:`community.docker.docker_image_tag module <community.docker.docker_image_tag#module>` allows you to tag a Docker image with additional names and/or tags.
|
||||
|
||||
docker_network module
|
||||
The :ref:`community.docker.docker_network module <ansible_collections.community.docker.docker_network_module>` provides full control over Docker networks.
|
||||
The :ansplugin:`community.docker.docker_network module <community.docker.docker_network#module>` provides full control over Docker networks.
|
||||
|
||||
docker_network_info module
|
||||
The :ref:`community.docker.docker_network_info module <ansible_collections.community.docker.docker_network_info_module>` allows you to inspect Docker networks.
|
||||
The :ansplugin:`community.docker.docker_network_info module <community.docker.docker_network_info#module>` allows you to inspect Docker networks.
|
||||
|
||||
docker_volume_info module
|
||||
The :ref:`community.docker.docker_volume_info module <ansible_collections.community.docker.docker_volume_info_module>` provides full control over Docker volumes.
|
||||
The :ansplugin:`community.docker.docker_volume_info module <community.docker.docker_volume_info#module>` provides full control over Docker volumes.
|
||||
|
||||
docker_volume module
|
||||
The :ref:`community.docker.docker_volume module <ansible_collections.community.docker.docker_volume_module>` allows you to inspect Docker volumes.
|
||||
The :ansplugin:`community.docker.docker_volume module <community.docker.docker_volume#module>` allows you to inspect Docker volumes.
|
||||
|
||||
docker_container module
|
||||
The :ref:`community.docker.docker_container module <ansible_collections.community.docker.docker_container_module>` manages the container lifecycle by providing the ability to create, update, stop, start and destroy a Docker container.
|
||||
The :ansplugin:`community.docker.docker_container module <community.docker.docker_container#module>` manages the container lifecycle by providing the ability to create, update, stop, start and destroy a Docker container.
|
||||
|
||||
docker_container_copy_into
|
||||
The :ansplugin:`community.docker.docker_container_copy_into module <community.docker.docker_container_copy_into#module>` allows you to copy files from the control node into a container.
|
||||
|
||||
docker_container_exec
|
||||
The :ansplugin:`community.docker.docker_container_exec module <community.docker.docker_container_exec#module>` allows you to execute commands in a running container.
|
||||
|
||||
docker_container_info module
|
||||
The :ref:`community.docker.docker_container_info module <ansible_collections.community.docker.docker_container_info_module>` allows you to inspect a Docker container.
|
||||
The :ansplugin:`community.docker.docker_container_info module <community.docker.docker_container_info#module>` allows you to inspect a Docker container.
|
||||
|
||||
docker_plugin
|
||||
The :ansplugin:`community.docker.docker_plugin module <community.docker.docker_plugin#module>` allows you to manage Docker plugins.
|
||||
|
||||
|
||||
Docker Compose
|
||||
--------------
|
||||
|
||||
The :ref:`community.docker.docker_compose module <ansible_collections.community.docker.docker_compose_module>`
|
||||
allows you to use your existing Docker compose files to orchestrate containers on a single Docker daemon or on Swarm.
|
||||
Supports compose versions 1 and 2.
|
||||
Docker Compose v2
|
||||
.................
|
||||
|
||||
Next to Docker SDK for Python, you need to install `docker-compose <https://github.com/docker/compose>`_ on the remote machines to use the module.
|
||||
There are several modules for working with Docker Compose projects:
|
||||
|
||||
community.docker.docker_compose_v2
|
||||
The :ansplugin:`community.docker.docker_compose_v2 module <community.docker.docker_compose_v2#module>` allows you to use your existing Docker Compose files to orchestrate containers on a single Docker daemon or on Swarm.
|
||||
|
||||
community.docker.docker_compose_v2_exec
|
||||
The :ansplugin:`community.docker.docker_compose_v2_exec module <community.docker.docker_compose_v2_exec#module>` allows you to run a command in a container of Docker Compose projects.
|
||||
|
||||
community.docker.docker_compose_v2_pull
|
||||
The :ansplugin:`community.docker.docker_compose_v2_pull module <community.docker.docker_compose_v2_pull#module>` allows you to pull Docker Compose projects.
|
||||
|
||||
community.docker.docker_compose_v2_run
|
||||
The :ansplugin:`community.docker.docker_compose_v2_run module <community.docker.docker_compose_v2_run#module>` allows you to run a command in a new container of a Docker Compose project.
|
||||
|
||||
These modules use the Docker CLI "compose" plugin (``docker compose``), and thus needs access to the Docker CLI tool.
|
||||
No further requirements next to to the CLI tool and its Docker Compose plugin are needed.
|
||||
|
||||
|
||||
Docker Machine
|
||||
--------------
|
||||
|
||||
The :ref:`community.docker.docker_machine inventory plugin <ansible_collections.community.docker.docker_machine_inventory>` allows you to dynamically add Docker Machine hosts to your Ansible inventory.
|
||||
The :ansplugin:`community.docker.docker_machine inventory plugin <community.docker.docker_machine#inventory>` allows you to dynamically add Docker Machine hosts to your Ansible inventory.
|
||||
|
||||
|
||||
Docker stack
|
||||
------------
|
||||
Docker Swarm stack
|
||||
------------------
|
||||
|
||||
The :ref:`community.docker.docker_stack module <ansible_collections.community.docker.docker_stack_module>` module allows you to control Docker stacks. Information on stacks can be retrieved by the :ref:`community.docker.docker_stack_info module <ansible_collections.community.docker.docker_stack_info_module>`, and information on stack tasks can be retrieved by the :ref:`community.docker.docker_stack_task_info module <ansible_collections.community.docker.docker_stack_task_info_module>`.
|
||||
The :ansplugin:`community.docker.docker_stack module <community.docker.docker_stack#module>` module allows you to control Docker Swarm stacks. Information on Swarm stacks can be retrieved by the :ansplugin:`community.docker.docker_stack_info module <community.docker.docker_stack_info#module>`, and information on Swarm stack tasks can be retrieved by the :ansplugin:`community.docker.docker_stack_task_info module <community.docker.docker_stack_task_info#module>`.
|
||||
|
||||
|
||||
Docker Swarm
|
||||
@ -195,19 +299,19 @@ Swarm management
|
||||
One inventory plugin and several modules are provided to manage Docker Swarms:
|
||||
|
||||
docker_swarm inventory plugin
|
||||
The :ref:`community.docker.docker_swarm inventory plugin <ansible_collections.community.docker.docker_swarm_inventory>` allows you to dynamically add all Docker Swarm nodes to your Ansible inventory.
|
||||
The :ansplugin:`community.docker.docker_swarm inventory plugin <community.docker.docker_swarm#inventory>` allows you to dynamically add all Docker Swarm nodes to your Ansible inventory.
|
||||
|
||||
docker_swarm module
|
||||
The :ref:`community.docker.docker_swarm module <ansible_collections.community.docker.docker_swarm_module>` allows you to globally configure Docker Swarm manager nodes to join and leave swarms, and to change the Docker Swarm configuration.
|
||||
The :ansplugin:`community.docker.docker_swarm module <community.docker.docker_swarm#module>` allows you to globally configure Docker Swarm manager nodes to join and leave swarms, and to change the Docker Swarm configuration.
|
||||
|
||||
docker_swarm_info module
|
||||
The :ref:`community.docker.docker_swarm_info module <ansible_collections.community.docker.docker_swarm_info_module>` allows you to retrieve information on Docker Swarm.
|
||||
The :ansplugin:`community.docker.docker_swarm_info module <community.docker.docker_swarm_info#module>` allows you to retrieve information on Docker Swarm.
|
||||
|
||||
docker_node module
|
||||
The :ref:`community.docker.docker_node module <ansible_collections.community.docker.docker_node_module>` allows you to manage Docker Swarm nodes.
|
||||
The :ansplugin:`community.docker.docker_node module <community.docker.docker_node#module>` allows you to manage Docker Swarm nodes.
|
||||
|
||||
docker_node_info module
|
||||
The :ref:`community.docker.docker_node_info module <ansible_collections.community.docker.docker_node_info_module>` allows you to retrieve information on Docker Swarm nodes.
|
||||
The :ansplugin:`community.docker.docker_node_info module <community.docker.docker_node_info#module>` allows you to retrieve information on Docker Swarm nodes.
|
||||
|
||||
Configuration management
|
||||
........................
|
||||
@ -215,21 +319,12 @@ Configuration management
|
||||
The community.docker collection offers modules to manage Docker Swarm configurations and secrets:
|
||||
|
||||
docker_config module
|
||||
The :ref:`community.docker.docker_config module <ansible_collections.community.docker.docker_config_module>` allows you to create and modify Docker Swarm configs.
|
||||
The :ansplugin:`community.docker.docker_config module <community.docker.docker_config#module>` allows you to create and modify Docker Swarm configs.
|
||||
|
||||
docker_secret module
|
||||
The :ref:`community.docker.docker_secret module <ansible_collections.community.docker.docker_secret_module>` allows you to create and modify Docker Swarm secrets.
|
||||
|
||||
The :ansplugin:`community.docker.docker_secret module <community.docker.docker_secret#module>` allows you to create and modify Docker Swarm secrets.
|
||||
|
||||
Swarm services
|
||||
..............
|
||||
|
||||
Docker Swarm services can be created and updated with the :ref:`community.docker.docker_swarm_service module <ansible_collections.community.docker.docker_swarm_service_module>`, and information on them can be queried by the :ref:`community.docker.docker_swarm_service_info module <ansible_collections.community.docker.docker_swarm_service_info_module>`.
|
||||
|
||||
|
||||
Helpful links
|
||||
-------------
|
||||
|
||||
Still using Dockerfile to build images? Check out `ansible-bender <https://github.com/ansible-community/ansible-bender>`_, and start building images from your Ansible playbooks.
|
||||
|
||||
Use `Ansible Operator <https://learn.openshift.com/ansibleop/ansible-operator-overview/>`_ to launch your docker-compose file on `OpenShift <https://www.okd.io/>`_. Go from an app on your laptop to a fully scalable app in the cloud with Kubernetes in just a few moments.
|
||||
Docker Swarm services can be created and updated with the :ansplugin:`community.docker.docker_swarm_service module <community.docker.docker_swarm_service#module>`, and information on them can be queried by the :ansplugin:`community.docker.docker_swarm_service_info module <community.docker.docker_swarm_service_info#module>`.
|
||||
|
||||
16
galaxy.yml
16
galaxy.yml
@ -1,17 +1,27 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# See https://docs.ansible.com/ansible/latest/dev_guide/collections_galaxy_meta.html
|
||||
|
||||
namespace: community
|
||||
name: docker
|
||||
version: 2.7.0
|
||||
version: 5.1.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible Docker Working Group
|
||||
description: Modules and plugins for working with Docker
|
||||
license_file: COPYING
|
||||
license:
|
||||
- GPL-3.0-or-later
|
||||
- Apache-2.0
|
||||
# license_file: COPYING
|
||||
tags:
|
||||
- docker
|
||||
dependencies:
|
||||
community.library_inventory_filtering_v1: '>=1.0.0'
|
||||
repository: https://github.com/ansible-collections/community.docker
|
||||
#documentation: https://github.com/ansible-collection-migration/community.REPO_NAME/tree/main/docs
|
||||
documentation: https://docs.ansible.com/ansible/latest/collections/community/docker/
|
||||
homepage: https://github.com/ansible-collections/community.docker
|
||||
issues: https://github.com/ansible-collections/community.docker/issues
|
||||
build_ignore:
|
||||
|
||||
@ -0,0 +1,3 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
@ -1,2 +1,16 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
docker
|
||||
docker-compose
|
||||
urllib3
|
||||
requests
|
||||
paramiko
|
||||
pyyaml
|
||||
|
||||
# We assume that EEs are not based on Windows, and have Python >= 3.5.
|
||||
# (ansible-builder does not support conditionals, it will simply add
|
||||
# the following unconditionally to the requirements)
|
||||
#
|
||||
# pywin32 ; sys_platform == 'win32'
|
||||
# backports.ssl-match-hostname ; python_version < '3.5'
|
||||
|
||||
@ -1,4 +1,8 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
version: 1
|
||||
dependencies:
|
||||
python: meta/ee-requirements.txt
|
||||
|
||||
@ -1,27 +1,51 @@
|
||||
---
|
||||
requires_ansible: '>=2.9.10'
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
requires_ansible: '>=2.17.0'
|
||||
action_groups:
|
||||
docker:
|
||||
- docker_compose
|
||||
- docker_config
|
||||
- docker_container
|
||||
- docker_container_exec
|
||||
- docker_container_info
|
||||
- docker_host_info
|
||||
- docker_image
|
||||
- docker_image_info
|
||||
- docker_image_load
|
||||
- docker_login
|
||||
- docker_network
|
||||
- docker_network_info
|
||||
- docker_node
|
||||
- docker_node_info
|
||||
- docker_plugin
|
||||
- docker_prune
|
||||
- docker_secret
|
||||
- docker_swarm
|
||||
- docker_swarm_info
|
||||
- docker_swarm_service
|
||||
- docker_swarm_service_info
|
||||
- docker_volume
|
||||
- docker_volume_info
|
||||
- docker_compose_v2
|
||||
- docker_compose_v2_exec
|
||||
- docker_compose_v2_pull
|
||||
- docker_compose_v2_run
|
||||
- docker_config
|
||||
- docker_container
|
||||
- docker_container_copy_into
|
||||
- docker_container_exec
|
||||
- docker_container_info
|
||||
- docker_host_info
|
||||
- docker_image
|
||||
- docker_image_build
|
||||
- docker_image_export
|
||||
- docker_image_info
|
||||
- docker_image_load
|
||||
- docker_image_pull
|
||||
- docker_image_push
|
||||
- docker_image_remove
|
||||
- docker_image_tag
|
||||
- docker_login
|
||||
- docker_network
|
||||
- docker_network_info
|
||||
- docker_node
|
||||
- docker_node_info
|
||||
- docker_plugin
|
||||
- docker_prune
|
||||
- docker_secret
|
||||
- docker_stack
|
||||
- docker_stack_info
|
||||
- docker_stack_task_info
|
||||
- docker_swarm
|
||||
- docker_swarm_info
|
||||
- docker_swarm_service
|
||||
- docker_swarm_service_info
|
||||
- docker_volume
|
||||
- docker_volume_info
|
||||
|
||||
plugin_routing:
|
||||
modules:
|
||||
docker_compose:
|
||||
tombstone:
|
||||
removal_version: 4.0.0
|
||||
warning_text: This module uses docker-compose v1, which is End of Life since July 2022. Please migrate to community.docker.docker_compose_v2.
|
||||
|
||||
27
noxfile.py
Normal file
27
noxfile.py
Normal file
@ -0,0 +1,27 @@
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||
|
||||
# /// script
|
||||
# dependencies = ["nox>=2025.02.09", "antsibull-nox"]
|
||||
# ///
|
||||
|
||||
import sys
|
||||
|
||||
import nox
|
||||
|
||||
|
||||
try:
|
||||
import antsibull_nox
|
||||
except ImportError:
|
||||
print("You need to install antsibull-nox in the same Python environment as nox.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
antsibull_nox.load_antsibull_nox_toml()
|
||||
|
||||
|
||||
# Allow to run the noxfile with `python noxfile.py`, `pipx run noxfile.py`, or similar.
|
||||
# Requires nox >= 2025.02.09
|
||||
if __name__ == "__main__":
|
||||
nox.main()
|
||||
49
plugins/action/docker_container_copy_into.py
Normal file
49
plugins/action/docker_container_copy_into.py
Normal file
@ -0,0 +1,49 @@
|
||||
# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import typing as t
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.vars import merge_hash
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._scramble import (
|
||||
unscramble,
|
||||
)
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
# Set to True when transferring files to the remote
|
||||
TRANSFERS_FILES = False
|
||||
|
||||
def run(
|
||||
self, tmp: str | None = None, task_vars: dict[str, t.Any] | None = None
|
||||
) -> dict[str, t.Any]:
|
||||
self._supports_check_mode = True
|
||||
self._supports_async = True
|
||||
|
||||
result = super().run(tmp, task_vars)
|
||||
del tmp # tmp no longer has any effect
|
||||
|
||||
# pylint: disable-next=no-member
|
||||
max_file_size_for_diff: int = C.MAX_FILE_SIZE_FOR_DIFF # type: ignore
|
||||
self._task.args["_max_file_size_for_diff"] = max_file_size_for_diff
|
||||
|
||||
result = merge_hash(
|
||||
result,
|
||||
self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val),
|
||||
)
|
||||
|
||||
if "diff" in result and result["diff"].get("scrambled_diff"):
|
||||
# Scrambling is not done for security, but to avoid no_log screwing up the diff
|
||||
diff = result["diff"]
|
||||
key = base64.b64decode(diff.pop("scrambled_diff"))
|
||||
for k in ("before", "after"):
|
||||
if k in diff:
|
||||
diff[k] = unscramble(diff[k], key)
|
||||
|
||||
return result
|
||||
@ -4,200 +4,250 @@
|
||||
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- Lorin Hochestein (!UNKNOWN)
|
||||
- Leendert Brouwer (!UNKNOWN)
|
||||
name: docker
|
||||
short_description: Run tasks in docker containers
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
author:
|
||||
- Lorin Hochestein (!UNKNOWN)
|
||||
- Leendert Brouwer (!UNKNOWN)
|
||||
name: docker
|
||||
short_description: Run tasks in docker containers
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses the Docker CLI to execute commands in the container. If you prefer to directly connect to the Docker daemon, use
|
||||
the P(community.docker.docker_api#connection) connection plugin.
|
||||
options:
|
||||
remote_addr:
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses the Docker CLI to execute commands in the container. If you prefer
|
||||
to directly connect to the Docker daemon, use the
|
||||
R(community.docker.docker_api,ansible_collections.community.docker.docker_api_connection)
|
||||
connection plugin.
|
||||
options:
|
||||
remote_addr:
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
remote_user:
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
- If Docker is too old to allow this (< 1.7), the one set by Docker itself will be used.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
docker_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the docker command line.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_docker_extra_args
|
||||
ini:
|
||||
- section: docker_connection
|
||||
key: extra_cli_args
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
'''
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
remote_user:
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
- If Docker is too old to allow this (< 1.7), the one set by Docker itself will be used.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
docker_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the docker command line.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_docker_extra_args
|
||||
ini:
|
||||
- section: docker_connection
|
||||
key: extra_cli_args
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
extra_env:
|
||||
description:
|
||||
- Provide extra environment variables to set when running commands in the Docker container.
|
||||
- This option can currently only be provided as Ansible variables due to limitations of ansible-core's configuration
|
||||
manager.
|
||||
vars:
|
||||
- name: ansible_docker_extra_env
|
||||
type: dict
|
||||
version_added: 3.12.0
|
||||
working_dir:
|
||||
description:
|
||||
- The directory inside the container to run commands in.
|
||||
- Requires Docker CLI version 18.06 or later.
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_WORKING_DIR
|
||||
ini:
|
||||
- key: working_dir
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_working_dir
|
||||
type: string
|
||||
version_added: 3.12.0
|
||||
privileged:
|
||||
description:
|
||||
- Whether commands should be run with extended privileges.
|
||||
- B(Note) that this allows command to potentially break out of the container. Use with care!
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_PRIVILEGED
|
||||
ini:
|
||||
- key: privileged
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_privileged
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 3.12.0
|
||||
"""
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import re
|
||||
import selectors
|
||||
import subprocess
|
||||
import typing as t
|
||||
from shlex import quote
|
||||
|
||||
from ansible.compat import selectors
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.errors import AnsibleConnectionFailure, AnsibleError, AnsibleFileNotFound
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.plugins.connection import BUFSIZE, ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
"""Local docker based connections"""
|
||||
|
||||
transport = 'community.docker.docker'
|
||||
transport = "community.docker.docker"
|
||||
has_pipelining = True
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
# Note: docker supports running as non-root in some configurations.
|
||||
# (For instance, setting the UNIX socket file to be readable and
|
||||
# writable by a specific UNIX group and then putting users into that
|
||||
# group). Therefore we don't check that the user is root when using
|
||||
# group). Therefore we do not check that the user is root when using
|
||||
# this connection. But if the user is getting a permission denied
|
||||
# error it probably means that docker on their system is only
|
||||
# configured to be connected to by root and they are not running as
|
||||
# root.
|
||||
|
||||
self._docker_args = []
|
||||
self._container_user_cache = {}
|
||||
self._version = None
|
||||
self._docker_args: list[bytes | str] = []
|
||||
self._container_user_cache: dict[str, str | None] = {}
|
||||
self._version: str | None = None
|
||||
self.remote_user: str | None = None
|
||||
self.timeout: int | float | None = None
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||
self.module_implementation_preferences = (".ps1", ".exe", "")
|
||||
|
||||
if 'docker_command' in kwargs:
|
||||
self.docker_cmd = kwargs['docker_command']
|
||||
if "docker_command" in kwargs:
|
||||
self.docker_cmd = kwargs["docker_command"]
|
||||
else:
|
||||
try:
|
||||
self.docker_cmd = get_bin_path('docker')
|
||||
except ValueError:
|
||||
raise AnsibleError("docker command not found in PATH")
|
||||
self.docker_cmd = get_bin_path("docker")
|
||||
except ValueError as exc:
|
||||
raise AnsibleError("docker command not found in PATH") from exc
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_version(version):
|
||||
version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
|
||||
version = re.sub(u'^v', u'', version)
|
||||
def _sanitize_version(version: str) -> str:
|
||||
version = re.sub("[^0-9a-zA-Z.]", "", version)
|
||||
version = re.sub("^v", "", version)
|
||||
return version
|
||||
|
||||
def _old_docker_version(self):
|
||||
def _old_docker_version(self) -> tuple[list[str], str, bytes, int]:
|
||||
cmd_args = self._docker_args
|
||||
|
||||
old_version_subcommand = ['version']
|
||||
old_version_subcommand = ["version"]
|
||||
|
||||
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
||||
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
with subprocess.Popen(
|
||||
old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
) as p:
|
||||
cmd_output, err = p.communicate()
|
||||
|
||||
return old_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
return old_docker_cmd, to_text(cmd_output), err, p.returncode
|
||||
|
||||
def _new_docker_version(self):
|
||||
def _new_docker_version(self) -> tuple[list[str], str, bytes, int]:
|
||||
# no result yet, must be newer Docker version
|
||||
cmd_args = self._docker_args
|
||||
|
||||
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
|
||||
new_version_subcommand = ["version", "--format", "'{{.Server.Version}}'"]
|
||||
|
||||
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
||||
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
return new_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _get_docker_version(self):
|
||||
with subprocess.Popen(
|
||||
new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
) as p:
|
||||
cmd_output, err = p.communicate()
|
||||
return new_docker_cmd, to_text(cmd_output), err, p.returncode
|
||||
|
||||
def _get_docker_version(self) -> str:
|
||||
cmd, cmd_output, err, returncode = self._old_docker_version()
|
||||
if returncode == 0:
|
||||
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
|
||||
if line.startswith(u'Server version:'): # old docker versions
|
||||
for line in to_text(cmd_output, errors="surrogate_or_strict").split("\n"):
|
||||
if line.startswith("Server version:"): # old docker versions
|
||||
return self._sanitize_version(line.split()[2])
|
||||
|
||||
cmd, cmd_output, err, returncode = self._new_docker_version()
|
||||
if returncode:
|
||||
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
|
||||
raise AnsibleError(
|
||||
f"Docker version check ({to_text(cmd)}) failed: {to_text(err)}"
|
||||
)
|
||||
|
||||
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
|
||||
return self._sanitize_version(to_text(cmd_output, errors="surrogate_or_strict"))
|
||||
|
||||
def _get_docker_remote_user(self):
|
||||
""" Get the default user configured in the docker container """
|
||||
container = self.get_option('remote_addr')
|
||||
def _get_docker_remote_user(self) -> str | None:
|
||||
"""Get the default user configured in the docker container"""
|
||||
container = self.get_option("remote_addr")
|
||||
if container in self._container_user_cache:
|
||||
return self._container_user_cache[container]
|
||||
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', container],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
with subprocess.Popen(
|
||||
[self.docker_cmd, "inspect", "--format", "{{.Config.User}}", container],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
) as p:
|
||||
out_b, err_b = p.communicate()
|
||||
out = to_text(out_b, errors="surrogate_or_strict")
|
||||
|
||||
out, err = p.communicate()
|
||||
out = to_text(out, errors='surrogate_or_strict')
|
||||
|
||||
if p.returncode != 0:
|
||||
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
|
||||
self._container_user_cache[container] = None
|
||||
return None
|
||||
if p.returncode != 0:
|
||||
display.warning(
|
||||
f"unable to retrieve default user from docker container: {out} {to_text(err_b)}"
|
||||
)
|
||||
self._container_user_cache[container] = None
|
||||
return None
|
||||
|
||||
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||
user = out.strip() or u'root'
|
||||
user = out.strip() or "root"
|
||||
self._container_user_cache[container] = user
|
||||
return user
|
||||
|
||||
def _build_exec_cmd(self, cmd):
|
||||
""" Build the local docker exec command to run cmd on remote_host
|
||||
def _build_exec_cmd(self, cmd: list[bytes | str]) -> list[bytes | str]:
|
||||
"""Build the local docker exec command to run cmd on remote_host
|
||||
|
||||
If remote_user is available and is supported by the docker
|
||||
version we are using, it will be provided to docker exec.
|
||||
If remote_user is available and is supported by the docker
|
||||
version we are using, it will be provided to docker exec.
|
||||
"""
|
||||
|
||||
local_cmd = [self.docker_cmd]
|
||||
@ -205,247 +255,371 @@ class Connection(ConnectionBase):
|
||||
if self._docker_args:
|
||||
local_cmd += self._docker_args
|
||||
|
||||
local_cmd += [b'exec']
|
||||
local_cmd += [b"exec"]
|
||||
|
||||
if self.remote_user is not None:
|
||||
local_cmd += [b'-u', self.remote_user]
|
||||
local_cmd += [b"-u", self.remote_user]
|
||||
|
||||
if self.get_option("extra_env"):
|
||||
for k, v in self.get_option("extra_env").items():
|
||||
for val, what in ((k, "Key"), (v, "Value")):
|
||||
if not isinstance(val, str):
|
||||
raise AnsibleConnectionFailure(
|
||||
f"Non-string {what.lower()} found for extra_env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"{what}: {val!r}"
|
||||
)
|
||||
local_cmd += [
|
||||
b"-e",
|
||||
b"%s=%s"
|
||||
% (
|
||||
to_bytes(k, errors="surrogate_or_strict"),
|
||||
to_bytes(v, errors="surrogate_or_strict"),
|
||||
),
|
||||
]
|
||||
|
||||
if self.get_option("working_dir") is not None:
|
||||
local_cmd += [
|
||||
b"-w",
|
||||
to_bytes(self.get_option("working_dir"), errors="surrogate_or_strict"),
|
||||
]
|
||||
if self.docker_version != "dev" and LooseVersion(
|
||||
self.docker_version
|
||||
) < LooseVersion("18.06"):
|
||||
# https://github.com/docker/cli/pull/732, first appeared in release 18.06.0
|
||||
raise AnsibleConnectionFailure(
|
||||
f"Providing the working directory requires Docker CLI version 18.06 or newer. You have Docker CLI version {self.docker_version}."
|
||||
)
|
||||
|
||||
if self.get_option("privileged"):
|
||||
local_cmd += [b"--privileged"]
|
||||
|
||||
# -i is needed to keep stdin open which allows pipelining to work
|
||||
local_cmd += [b'-i', self.get_option('remote_addr')] + cmd
|
||||
local_cmd += [b"-i", self.get_option("remote_addr")] + cmd
|
||||
|
||||
return local_cmd
|
||||
|
||||
def _set_docker_args(self):
|
||||
def _set_docker_args(self) -> None:
|
||||
# TODO: this is mostly for backwards compatibility, play_context is used as fallback for older versions
|
||||
# docker arguments
|
||||
del self._docker_args[:]
|
||||
extra_args = self.get_option('docker_extra_args') or getattr(self._play_context, 'docker_extra_args', '')
|
||||
extra_args = self.get_option("docker_extra_args") or getattr(
|
||||
self._play_context, "docker_extra_args", ""
|
||||
)
|
||||
if extra_args:
|
||||
self._docker_args += extra_args.split(' ')
|
||||
self._docker_args += extra_args.split(" ")
|
||||
|
||||
def _set_conn_data(self):
|
||||
|
||||
''' initialize for the connection, cannot do only in init since all data is not ready at that point '''
|
||||
def _set_conn_data(self) -> None:
|
||||
"""initialize for the connection, cannot do only in init since all data is not ready at that point"""
|
||||
|
||||
self._set_docker_args()
|
||||
|
||||
self.remote_user = self.get_option('remote_user')
|
||||
self.remote_user = self.get_option("remote_user")
|
||||
if self.remote_user is None and self._play_context.remote_user is not None:
|
||||
self.remote_user = self._play_context.remote_user
|
||||
|
||||
# timeout, use unless default and pc is different, backwards compat
|
||||
self.timeout = self.get_option('container_timeout')
|
||||
self.timeout = self.get_option("container_timeout")
|
||||
if self.timeout == 10 and self.timeout != self._play_context.timeout:
|
||||
self.timeout = self._play_context.timeout
|
||||
|
||||
@property
|
||||
def docker_version(self):
|
||||
|
||||
def docker_version(self) -> str:
|
||||
if not self._version:
|
||||
self._set_docker_args()
|
||||
|
||||
self._version = self._get_docker_version()
|
||||
if self._version == u'dev':
|
||||
display.warning(u'Docker version number is "dev". Will assume latest version.')
|
||||
if self._version != u'dev' and LooseVersion(self._version) < LooseVersion(u'1.3'):
|
||||
raise AnsibleError('docker connection type requires docker 1.3 or higher')
|
||||
if self._version == "dev":
|
||||
display.warning(
|
||||
'Docker version number is "dev". Will assume latest version.'
|
||||
)
|
||||
if self._version != "dev" and LooseVersion(self._version) < LooseVersion(
|
||||
"1.3"
|
||||
):
|
||||
raise AnsibleError(
|
||||
"docker connection type requires docker 1.3 or higher"
|
||||
)
|
||||
return self._version
|
||||
|
||||
def _get_actual_user(self):
|
||||
def _get_actual_user(self) -> str | None:
|
||||
if self.remote_user is not None:
|
||||
# An explicit user is provided
|
||||
if self.docker_version == u'dev' or LooseVersion(self.docker_version) >= LooseVersion(u'1.7'):
|
||||
if self.docker_version == "dev" or LooseVersion(
|
||||
self.docker_version
|
||||
) >= LooseVersion("1.7"):
|
||||
# Support for specifying the exec user was added in docker 1.7
|
||||
return self.remote_user
|
||||
else:
|
||||
self.remote_user = None
|
||||
actual_user = self._get_docker_remote_user()
|
||||
if actual_user != self.get_option('remote_user'):
|
||||
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
|
||||
.format(self.docker_version, self.actual_user or u'?'))
|
||||
return actual_user
|
||||
elif self._display.verbosity > 2:
|
||||
# Since we're not setting the actual_user, look it up so we have it for logging later
|
||||
self.remote_user = None
|
||||
actual_user = self._get_docker_remote_user()
|
||||
if actual_user != self.get_option("remote_user"):
|
||||
display.warning(
|
||||
f"docker {self.docker_version} does not support remote_user, using container default: {actual_user or '?'}"
|
||||
)
|
||||
return actual_user
|
||||
if self._display.verbosity > 2:
|
||||
# Since we are not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we don't need to.
|
||||
# This saves overhead from calling into docker when we do not need to.
|
||||
return self._get_docker_remote_user()
|
||||
else:
|
||||
return None
|
||||
return None
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
def _connect(self) -> t.Self:
|
||||
"""Connect to the container. Nothing to do"""
|
||||
super()._connect() # type: ignore[safe-super]
|
||||
if not self._connected:
|
||||
self._set_conn_data()
|
||||
actual_user = self._get_actual_user()
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
actual_user or u'?'), host=self.get_option('remote_addr')
|
||||
display.vvv(
|
||||
f"ESTABLISH DOCKER CONNECTION FOR USER: {actual_user or '?'}",
|
||||
host=self.get_option("remote_addr"),
|
||||
)
|
||||
self._connected = True
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" Run a command on the docker host """
|
||||
def exec_command(
|
||||
self, cmd: str, in_data: bytes | None = None, sudoable: bool = False
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
"""Run a command on the docker host"""
|
||||
|
||||
self._set_conn_data()
|
||||
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # type: ignore[safe-super]
|
||||
|
||||
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
|
||||
local_cmd = self._build_exec_cmd([self._play_context.executable, "-c", cmd])
|
||||
|
||||
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self.get_option('remote_addr'))
|
||||
display.vvv(f"EXEC {to_text(local_cmd)}", host=self.get_option("remote_addr"))
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd]
|
||||
|
||||
p = subprocess.Popen(
|
||||
with subprocess.Popen(
|
||||
local_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
display.debug("done running command with Popen()")
|
||||
) as p:
|
||||
assert p.stdin is not None
|
||||
assert p.stdout is not None
|
||||
assert p.stderr is not None
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(
|
||||
p.stdout,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stderr,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||
)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b''
|
||||
try:
|
||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||
become_output = b""
|
||||
try:
|
||||
while not self.become.check_success(
|
||||
become_output
|
||||
) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError(
|
||||
"timeout waiting for privilege escalation password prompt:\n"
|
||||
+ to_text(become_output)
|
||||
)
|
||||
|
||||
for key, event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
chunks = b""
|
||||
for key, dummy_event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
if chunk:
|
||||
chunks += chunk
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
if chunk:
|
||||
chunks += chunk
|
||||
|
||||
if not chunk:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||
become_output += chunk
|
||||
finally:
|
||||
selector.close()
|
||||
if not chunks:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError(
|
||||
"privilege output closed while waiting for password prompt:\n"
|
||||
+ to_text(become_output)
|
||||
)
|
||||
become_output += chunks
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option(
|
||||
"become_pass", playcontext=self._play_context
|
||||
)
|
||||
p.stdin.write(
|
||||
to_bytes(become_pass, errors="surrogate_or_strict") + b"\n"
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stdout,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stderr,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||
)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
display.debug("done with docker.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
display.debug("done with docker.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
def _prefix_login_path(self, remote_path: str) -> str:
|
||||
"""Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we're choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we are not guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we are choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
'''
|
||||
Can revisit using $HOME instead if it is a problem
|
||||
"""
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
return ntpath.normpath(remote_path)
|
||||
else:
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" Transfer a file from local to docker container """
|
||||
return ntpath.normpath(remote_path)
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path: str, out_path: str) -> None:
|
||||
"""Transfer a file from local to docker container"""
|
||||
self._set_conn_data()
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
super().put_file(in_path, out_path) # type: ignore[safe-super]
|
||||
display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option("remote_addr"))
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
if not os.path.exists(to_bytes(in_path, errors="surrogate_or_strict")):
|
||||
raise AnsibleFileNotFound(
|
||||
"file or module does not exist: %s" % to_native(in_path))
|
||||
f"file or module does not exist: {to_text(in_path)}"
|
||||
)
|
||||
|
||||
out_path = shlex_quote(out_path)
|
||||
# Older docker doesn't have native support for copying files into
|
||||
out_path = quote(out_path)
|
||||
# Older docker does not have native support for copying files into
|
||||
# running containers, so we use docker exec to implement this
|
||||
# Although docker version 1.8 and later provide support, the
|
||||
# owner and group of the files are always set to root
|
||||
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
|
||||
with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
|
||||
if not os.fstat(in_file.fileno()).st_size:
|
||||
count = ' count=0'
|
||||
count = " count=0"
|
||||
else:
|
||||
count = ''
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
count = ""
|
||||
args = self._build_exec_cmd(
|
||||
[
|
||||
self._play_context.executable,
|
||||
"-c",
|
||||
f"dd of={out_path} bs={BUFSIZE}{count}",
|
||||
]
|
||||
)
|
||||
args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
# pylint: disable-next=consider-using-with
|
||||
p = subprocess.Popen(
|
||||
args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
except OSError as exc:
|
||||
raise AnsibleError(
|
||||
"docker connection requires dd command in the container to put files"
|
||||
) from exc
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
|
||||
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
|
||||
raise AnsibleError(
|
||||
f"failed to transfer file {to_text(in_path)} to {to_text(out_path)}:\n{to_text(stdout)}\n{to_text(stderr)}"
|
||||
)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
def fetch_file(self, in_path: str, out_path: str) -> None:
|
||||
"""Fetch a file from container to local."""
|
||||
self._set_conn_data()
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
super().fetch_file(in_path, out_path) # type: ignore[safe-super]
|
||||
display.vvv(
|
||||
f"FETCH {in_path} TO {out_path}", host=self.get_option("remote_addr")
|
||||
)
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
# out_path is the final file path, but docker takes a directory, not a
|
||||
# file path
|
||||
out_dir = os.path.dirname(out_path)
|
||||
|
||||
args = [self.docker_cmd, "cp", "%s:%s" % (self.get_option('remote_addr'), in_path), out_dir]
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
args = [
|
||||
self.docker_cmd,
|
||||
"cp",
|
||||
f"{self.get_option('remote_addr')}:{in_path}",
|
||||
out_dir,
|
||||
]
|
||||
args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
|
||||
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
with subprocess.Popen(
|
||||
args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
) as p:
|
||||
p.communicate()
|
||||
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
||||
else:
|
||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
|
||||
if p.returncode != 0:
|
||||
# Older docker doesn't have native support for fetching files command `cp`
|
||||
# If `cp` fails, try to use `dd` instead
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=out_file, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
||||
else:
|
||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
if p.returncode != 0:
|
||||
# Older docker does not have native support for fetching files command `cp`
|
||||
# If `cp` fails, try to use `dd` instead
|
||||
args = self._build_exec_cmd(
|
||||
[
|
||||
self._play_context.executable,
|
||||
"-c",
|
||||
f"dd if={in_path} bs={BUFSIZE}",
|
||||
]
|
||||
)
|
||||
args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
|
||||
with open(
|
||||
to_bytes(actual_out_path, errors="surrogate_or_strict"), "wb"
|
||||
) as out_file:
|
||||
try:
|
||||
# pylint: disable-next=consider-using-with
|
||||
pp = subprocess.Popen(
|
||||
args,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=out_file,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
except OSError as exc:
|
||||
raise AnsibleError(
|
||||
"docker connection requires dd command in the container to put files"
|
||||
) from exc
|
||||
stdout, stderr = pp.communicate()
|
||||
|
||||
if pp.returncode != 0:
|
||||
raise AnsibleError(
|
||||
f"failed to fetch file {in_path} to {out_path}:\n{stdout!r}\n{stderr!r}"
|
||||
)
|
||||
|
||||
# Rename if needed
|
||||
if actual_out_path != out_path:
|
||||
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
os.rename(
|
||||
to_bytes(actual_out_path, errors="strict"),
|
||||
to_bytes(out_path, errors="strict"),
|
||||
)
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
super(Connection, self).close()
|
||||
def close(self) -> None:
|
||||
"""Terminate the connection. Nothing to do for Docker"""
|
||||
super().close() # type: ignore[safe-super]
|
||||
self._connected = False
|
||||
|
||||
def reset(self):
|
||||
def reset(self) -> None:
|
||||
# Clear container user cache
|
||||
self._container_user_cache = {}
|
||||
|
||||
@ -1,102 +1,150 @@
|
||||
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
- Felix Fontein (@felixfontein)
|
||||
name: docker_api
|
||||
short_description: Run tasks in docker containers
|
||||
version_added: 1.1.0
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses Docker SDK for Python to interact directly with the Docker daemon instead of
|
||||
using the Docker CLI. Use the
|
||||
R(community.docker.docker,ansible_collections.community.docker.docker_connection)
|
||||
connection plugin if you want to use the Docker CLI.
|
||||
options:
|
||||
remote_user:
|
||||
type: str
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
remote_addr:
|
||||
type: str
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses the L(requests library,https://pypi.org/project/requests/) to interact directly with the Docker daemon instead of
|
||||
using the Docker CLI. Use the P(community.docker.docker#connection) connection plugin if you want to use the Docker CLI.
|
||||
notes:
|
||||
- Does B(not work with TCP TLS sockets)! This is caused by the inability to send C(close_notify) without closing the connection
|
||||
with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.var_names
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
'''
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._docker.var_names
|
||||
options:
|
||||
remote_user:
|
||||
type: str
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
remote_addr:
|
||||
type: str
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
extra_env:
|
||||
description:
|
||||
- Provide extra environment variables to set when running commands in the Docker container.
|
||||
- This option can currently only be provided as Ansible variables due to limitations of ansible-core's configuration
|
||||
manager.
|
||||
vars:
|
||||
- name: ansible_docker_extra_env
|
||||
type: dict
|
||||
version_added: 3.12.0
|
||||
working_dir:
|
||||
description:
|
||||
- The directory inside the container to run commands in.
|
||||
- Requires Docker API version 1.35 or later.
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_WORKING_DIR
|
||||
ini:
|
||||
- key: working_dir
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_working_dir
|
||||
type: string
|
||||
version_added: 3.12.0
|
||||
privileged:
|
||||
description:
|
||||
- Whether commands should be run with extended privileges.
|
||||
- B(Note) that this allows command to potentially break out of the container. Use with care!
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_PRIVILEGED
|
||||
ini:
|
||||
- key: privileged
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_privileged
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 3.12.0
|
||||
"""
|
||||
|
||||
import io
|
||||
import os
|
||||
import os.path
|
||||
import shutil
|
||||
import tarfile
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleFileNotFound, AnsibleConnectionFailure
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.errors import AnsibleConnectionFailure, AnsibleFileNotFound
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
DockerException,
|
||||
NotFound,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.socket_handler import (
|
||||
DockerSocketHandler,
|
||||
from ansible_collections.community.docker.plugins.module_utils._copy import (
|
||||
DockerFileCopyError,
|
||||
DockerFileNotFound,
|
||||
fetch_file,
|
||||
put_file,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.common import (
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._socket_handler import (
|
||||
DockerSocketHandler,
|
||||
)
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
_T = t.TypeVar("_T")
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError, NotFound
|
||||
except Exception:
|
||||
# missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
|
||||
pass
|
||||
|
||||
MIN_DOCKER_PY = '1.7.0'
|
||||
MIN_DOCKER_API = None
|
||||
|
||||
|
||||
@ -104,128 +152,206 @@ display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
"""Local docker based connections"""
|
||||
|
||||
transport = 'community.docker.docker_api'
|
||||
transport = "community.docker.docker_api"
|
||||
has_pipelining = True
|
||||
|
||||
def _call_client(self, callable, not_found_can_be_resource=False):
|
||||
def _call_client(
|
||||
self,
|
||||
f: Callable[[AnsibleDockerClient], _T],
|
||||
not_found_can_be_resource: bool = False,
|
||||
) -> _T:
|
||||
if self.client is None:
|
||||
raise AssertionError("Client must be present")
|
||||
remote_addr = self.get_option("remote_addr")
|
||||
try:
|
||||
return callable()
|
||||
return f(self.client)
|
||||
except NotFound as e:
|
||||
if not_found_can_be_resource:
|
||||
raise AnsibleConnectionFailure('Could not find container "{1}" or resource in it ({0})'.format(e, self.get_option('remote_addr')))
|
||||
else:
|
||||
raise AnsibleConnectionFailure('Could not find container "{1}" ({0})'.format(e, self.get_option('remote_addr')))
|
||||
raise AnsibleConnectionFailure(
|
||||
f'Could not find container "{remote_addr}" or resource in it ({e})'
|
||||
) from e
|
||||
raise AnsibleConnectionFailure(
|
||||
f'Could not find container "{remote_addr}" ({e})'
|
||||
) from e
|
||||
except APIError as e:
|
||||
if e.response and e.response.status_code == 409:
|
||||
raise AnsibleConnectionFailure('The container "{1}" has been paused ({0})'.format(e, self.get_option('remote_addr')))
|
||||
if e.response is not None and e.response.status_code == 409:
|
||||
raise AnsibleConnectionFailure(
|
||||
f'The container "{remote_addr}" has been paused ({e})'
|
||||
) from e
|
||||
self.client.fail(
|
||||
'An unexpected docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
|
||||
f'An unexpected Docker error occurred for container "{remote_addr}": {e}'
|
||||
)
|
||||
except DockerException as e:
|
||||
self.client.fail(
|
||||
'An unexpected docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
|
||||
f'An unexpected Docker error occurred for container "{remote_addr}": {e}'
|
||||
)
|
||||
except RequestException as e:
|
||||
self.client.fail(
|
||||
'An unexpected requests error occurred for container "{1}" when docker-py tried to talk to the docker daemon: {0}'
|
||||
.format(e, self.get_option('remote_addr'))
|
||||
f'An unexpected requests error occurred for container "{remote_addr}" when trying to talk to the Docker daemon: {e}'
|
||||
)
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.client = None
|
||||
self.ids = dict()
|
||||
self.client: AnsibleDockerClient | None = None
|
||||
self.ids: dict[str | None, tuple[int, int]] = {}
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||
self.module_implementation_preferences = (".ps1", ".exe", "")
|
||||
|
||||
self.actual_user = None
|
||||
self.actual_user: str | None = None
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
def _connect(self) -> Connection:
|
||||
"""Connect to the container. Nothing to do"""
|
||||
super()._connect() # type: ignore[safe-super]
|
||||
if not self._connected:
|
||||
self.actual_user = self.get_option('remote_user')
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
self.actual_user or u'?'), host=self.get_option('remote_addr')
|
||||
self.actual_user = self.get_option("remote_user")
|
||||
display.vvv(
|
||||
f"ESTABLISH DOCKER CONNECTION FOR USER: {self.actual_user or '?'}",
|
||||
host=self.get_option("remote_addr"),
|
||||
)
|
||||
if self.client is None:
|
||||
self.client = AnsibleDockerClient(self, min_docker_version=MIN_DOCKER_PY, min_docker_api_version=MIN_DOCKER_API)
|
||||
self.client = AnsibleDockerClient(
|
||||
self, min_docker_api_version=MIN_DOCKER_API
|
||||
)
|
||||
self._connected = True
|
||||
|
||||
if self.actual_user is None and display.verbosity > 2:
|
||||
# Since we're not setting the actual_user, look it up so we have it for logging later
|
||||
# Since we are not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we don't need to
|
||||
display.vvv(u"Trying to determine actual user")
|
||||
result = self._call_client(lambda: self.client.inspect_container(self.get_option('remote_addr')))
|
||||
if result.get('Config'):
|
||||
self.actual_user = result['Config'].get('User')
|
||||
# This saves overhead from calling into docker when we do not need to
|
||||
display.vvv("Trying to determine actual user")
|
||||
result = self._call_client(
|
||||
lambda client: client.get_json(
|
||||
"/containers/{0}/json", self.get_option("remote_addr")
|
||||
)
|
||||
)
|
||||
if result.get("Config"):
|
||||
self.actual_user = result["Config"].get("User")
|
||||
if self.actual_user is not None:
|
||||
display.vvv(u"Actual user is '{0}'".format(self.actual_user))
|
||||
display.vvv(f"Actual user is '{self.actual_user}'")
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" Run a command on the docker host """
|
||||
return self
|
||||
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
def exec_command(
|
||||
self, cmd: str, in_data: bytes | None = None, sudoable: bool = False
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
"""Run a command on the docker host"""
|
||||
|
||||
command = [self._play_context.executable, '-c', to_text(cmd)]
|
||||
super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # type: ignore[safe-super]
|
||||
|
||||
if self.client is None:
|
||||
raise AssertionError("Client must be present")
|
||||
|
||||
command = [self._play_context.executable, "-c", cmd]
|
||||
|
||||
do_become = self.become and self.become.expect_prompt() and sudoable
|
||||
|
||||
stdin_part = (
|
||||
f", with stdin ({len(in_data)} bytes)" if in_data is not None else ""
|
||||
)
|
||||
become_part = ", with become prompt" if do_become else ""
|
||||
display.vvv(
|
||||
u"EXEC {0}{1}{2}".format(
|
||||
to_text(command),
|
||||
', with stdin ({0} bytes)'.format(len(in_data)) if in_data is not None else '',
|
||||
', with become prompt' if do_become else '',
|
||||
),
|
||||
host=self.get_option('remote_addr')
|
||||
f"EXEC {to_text(command)}{stdin_part}{become_part}",
|
||||
host=self.get_option("remote_addr"),
|
||||
)
|
||||
|
||||
need_stdin = True if (in_data is not None) or do_become else False
|
||||
need_stdin = bool((in_data is not None) or do_become)
|
||||
|
||||
exec_data = self._call_client(lambda: self.client.exec_create(
|
||||
self.get_option('remote_addr'),
|
||||
command,
|
||||
stdout=True,
|
||||
stderr=True,
|
||||
stdin=need_stdin,
|
||||
user=self.get_option('remote_user') or '',
|
||||
# workdir=None, - only works for Docker SDK for Python 3.0.0 and later
|
||||
))
|
||||
exec_id = exec_data['Id']
|
||||
data = {
|
||||
"Container": self.get_option("remote_addr"),
|
||||
"User": self.get_option("remote_user") or "",
|
||||
"Privileged": self.get_option("privileged"),
|
||||
"Tty": False,
|
||||
"AttachStdin": need_stdin,
|
||||
"AttachStdout": True,
|
||||
"AttachStderr": True,
|
||||
"Cmd": command,
|
||||
}
|
||||
|
||||
if "detachKeys" in self.client._general_configs:
|
||||
data["detachKeys"] = self.client._general_configs["detachKeys"]
|
||||
|
||||
if self.get_option("extra_env"):
|
||||
data["Env"] = []
|
||||
for k, v in self.get_option("extra_env").items():
|
||||
for val, what in ((k, "Key"), (v, "Value")):
|
||||
if not isinstance(val, str):
|
||||
raise AnsibleConnectionFailure(
|
||||
f"Non-string {what.lower()} found for extra_env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"{what}: {val!r}"
|
||||
)
|
||||
data["Env"].append(f"{k}={v}")
|
||||
|
||||
if self.get_option("working_dir") is not None:
|
||||
data["WorkingDir"] = self.get_option("working_dir")
|
||||
if self.client.docker_api_version < LooseVersion("1.35"):
|
||||
raise AnsibleConnectionFailure(
|
||||
"Providing the working directory requires Docker API version 1.35 or newer."
|
||||
f" The Docker daemon the connection is using has API version {self.client.docker_api_version_str}."
|
||||
)
|
||||
|
||||
exec_data = self._call_client(
|
||||
lambda client: client.post_json_to_json(
|
||||
"/containers/{0}/exec", self.get_option("remote_addr"), data=data
|
||||
)
|
||||
)
|
||||
exec_id = exec_data["Id"]
|
||||
|
||||
data = {"Tty": False, "Detach": False}
|
||||
if need_stdin:
|
||||
exec_socket = self._call_client(lambda: self.client.exec_start(
|
||||
exec_id,
|
||||
detach=False,
|
||||
socket=True,
|
||||
))
|
||||
exec_socket = self._call_client(
|
||||
lambda client: client.post_json_to_stream_socket(
|
||||
"/exec/{0}/start", exec_id, data=data
|
||||
)
|
||||
)
|
||||
try:
|
||||
with DockerSocketHandler(display, exec_socket, container=self.get_option('remote_addr')) as exec_socket_handler:
|
||||
with DockerSocketHandler(
|
||||
display, exec_socket, container=self.get_option("remote_addr")
|
||||
) as exec_socket_handler:
|
||||
if do_become:
|
||||
become_output = [b'']
|
||||
assert self.become is not None
|
||||
|
||||
def append_become_output(stream_id, data):
|
||||
become_output = [b""]
|
||||
|
||||
def append_become_output(stream_id: int, data: bytes) -> None:
|
||||
become_output[0] += data
|
||||
|
||||
exec_socket_handler.set_block_done_callback(append_become_output)
|
||||
exec_socket_handler.set_block_done_callback(
|
||||
append_become_output
|
||||
)
|
||||
|
||||
while not self.become.check_success(become_output[0]) and not self.become.check_password_prompt(become_output[0]):
|
||||
if not exec_socket_handler.select(self.get_option('container_timeout')):
|
||||
while not self.become.check_success(
|
||||
become_output[0]
|
||||
) and not self.become.check_password_prompt(become_output[0]):
|
||||
if not exec_socket_handler.select(
|
||||
self.get_option("container_timeout")
|
||||
):
|
||||
stdout, stderr = exec_socket_handler.consume()
|
||||
raise AnsibleConnectionFailure('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output[0]))
|
||||
raise AnsibleConnectionFailure(
|
||||
"timeout waiting for privilege escalation password prompt:\n"
|
||||
+ to_text(become_output[0])
|
||||
)
|
||||
|
||||
if exec_socket_handler.is_eof():
|
||||
raise AnsibleConnectionFailure('privilege output closed while waiting for password prompt:\n' + to_native(become_output[0]))
|
||||
raise AnsibleConnectionFailure(
|
||||
"privilege output closed while waiting for password prompt:\n"
|
||||
+ to_text(become_output[0])
|
||||
)
|
||||
|
||||
if not self.become.check_success(become_output[0]):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
exec_socket_handler.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
become_pass = self.become.get_option(
|
||||
"become_pass", playcontext=self._play_context
|
||||
)
|
||||
exec_socket_handler.write(
|
||||
to_bytes(become_pass, errors="surrogate_or_strict")
|
||||
+ b"\n"
|
||||
)
|
||||
|
||||
if in_data is not None:
|
||||
exec_socket_handler.write(in_data)
|
||||
@ -234,155 +360,122 @@ class Connection(ConnectionBase):
|
||||
finally:
|
||||
exec_socket.close()
|
||||
else:
|
||||
stdout, stderr = self._call_client(lambda: self.client.exec_start(
|
||||
exec_id,
|
||||
detach=False,
|
||||
stream=False,
|
||||
socket=False,
|
||||
demux=True,
|
||||
))
|
||||
stdout, stderr = self._call_client(
|
||||
lambda client: client.post_json_to_stream(
|
||||
"/exec/{0}/start",
|
||||
exec_id,
|
||||
stream=False,
|
||||
demux=True,
|
||||
tty=False,
|
||||
data=data,
|
||||
)
|
||||
)
|
||||
|
||||
result = self._call_client(lambda: self.client.exec_inspect(exec_id))
|
||||
result = self._call_client(
|
||||
lambda client: client.get_json("/exec/{0}/json", exec_id)
|
||||
)
|
||||
|
||||
return result.get('ExitCode') or 0, stdout or b'', stderr or b''
|
||||
return result.get("ExitCode") or 0, stdout or b"", stderr or b""
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
def _prefix_login_path(self, remote_path: str) -> str:
|
||||
"""Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we're choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we are not guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we are choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
'''
|
||||
Can revisit using $HOME instead if it is a problem
|
||||
"""
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
return ntpath.normpath(remote_path)
|
||||
else:
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" Transfer a file from local to docker container """
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
return ntpath.normpath(remote_path)
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path: str, out_path: str) -> None:
|
||||
"""Transfer a file from local to docker container"""
|
||||
super().put_file(in_path, out_path) # type: ignore[safe-super]
|
||||
display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option("remote_addr"))
|
||||
|
||||
if self.client is None:
|
||||
raise AssertionError("Client must be present")
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise AnsibleFileNotFound(
|
||||
"file or module does not exist: %s" % to_native(in_path))
|
||||
|
||||
if self.actual_user not in self.ids:
|
||||
dummy, ids, dummy = self.exec_command(b'id -u && id -g')
|
||||
dummy, ids, dummy2 = self.exec_command("id -u && id -g")
|
||||
remote_addr = self.get_option("remote_addr")
|
||||
try:
|
||||
user_id, group_id = ids.splitlines()
|
||||
self.ids[self.actual_user] = int(user_id), int(group_id)
|
||||
b_user_id, b_group_id = ids.splitlines()
|
||||
user_id, group_id = int(b_user_id), int(b_group_id)
|
||||
self.ids[self.actual_user] = user_id, group_id
|
||||
display.vvvv(
|
||||
'PUT: Determined uid={0} and gid={1} for user "{2}"'.format(user_id, group_id, self.actual_user),
|
||||
host=self.get_option('remote_addr')
|
||||
f'PUT: Determined uid={user_id} and gid={group_id} for user "{self.actual_user}"',
|
||||
host=remote_addr,
|
||||
)
|
||||
except Exception as e:
|
||||
raise AnsibleConnectionFailure(
|
||||
'Error while determining user and group ID of current user in container "{1}": {0}\nGot value: {2!r}'
|
||||
.format(e, self.get_option('remote_addr'), ids)
|
||||
)
|
||||
f'Error while determining user and group ID of current user in container "{remote_addr}": {e}\nGot value: {ids!r}'
|
||||
) from e
|
||||
|
||||
b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
|
||||
|
||||
out_dir, out_file = os.path.split(out_path)
|
||||
|
||||
# TODO: stream tar file, instead of creating it in-memory into a BytesIO
|
||||
|
||||
bio = io.BytesIO()
|
||||
with tarfile.open(fileobj=bio, mode='w|', dereference=True, encoding='utf-8') as tar:
|
||||
# Note that without both name (bytes) and arcname (unicode), this either fails for
|
||||
# Python 2.6/2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
|
||||
# form) it works with Python 2.6, 2.7, 3.5, 3.6, and 3.7 up to 3.9.
|
||||
tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
|
||||
user_id, group_id = self.ids[self.actual_user]
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.uname = ''
|
||||
if self.actual_user:
|
||||
tarinfo.uname = self.actual_user
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.gname = ''
|
||||
tarinfo.mode &= 0o700
|
||||
with open(b_in_path, 'rb') as f:
|
||||
tar.addfile(tarinfo, fileobj=f)
|
||||
data = bio.getvalue()
|
||||
|
||||
ok = self._call_client(lambda: self.client.put_archive(
|
||||
self.get_option('remote_addr'),
|
||||
out_dir,
|
||||
data, # can also be file object for streaming; this is only clear from the
|
||||
# implementation of put_archive(), which uses requests's put().
|
||||
# See https://2.python-requests.org/en/master/user/advanced/#streaming-uploads
|
||||
# WARNING: might not work with all transports!
|
||||
), not_found_can_be_resource=True)
|
||||
if not ok:
|
||||
raise AnsibleConnectionFailure(
|
||||
'Unknown error while creating file "{0}" in container "{1}".'
|
||||
.format(out_path, self.get_option('remote_addr'))
|
||||
user_id, group_id = self.ids[self.actual_user]
|
||||
try:
|
||||
self._call_client(
|
||||
lambda client: put_file(
|
||||
client,
|
||||
container=self.get_option("remote_addr"),
|
||||
in_path=in_path,
|
||||
out_path=out_path,
|
||||
user_id=user_id,
|
||||
group_id=group_id,
|
||||
user_name=self.actual_user,
|
||||
follow_links=True,
|
||||
),
|
||||
not_found_can_be_resource=True,
|
||||
)
|
||||
except DockerFileNotFound as exc:
|
||||
raise AnsibleFileNotFound(to_text(exc)) from exc
|
||||
except DockerFileCopyError as exc:
|
||||
raise AnsibleConnectionFailure(to_text(exc)) from exc
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
def fetch_file(self, in_path: str, out_path: str) -> None:
|
||||
"""Fetch a file from container to local."""
|
||||
super().fetch_file(in_path, out_path) # type: ignore[safe-super]
|
||||
display.vvv(
|
||||
f"FETCH {in_path} TO {out_path}", host=self.get_option("remote_addr")
|
||||
)
|
||||
|
||||
if self.client is None:
|
||||
raise AssertionError("Client must be present")
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
|
||||
|
||||
considered_in_paths = set()
|
||||
try:
|
||||
self._call_client(
|
||||
lambda client: fetch_file(
|
||||
client,
|
||||
container=self.get_option("remote_addr"),
|
||||
in_path=in_path,
|
||||
out_path=out_path,
|
||||
follow_links=True,
|
||||
log=lambda msg: display.vvvv(
|
||||
msg, host=self.get_option("remote_addr")
|
||||
),
|
||||
),
|
||||
not_found_can_be_resource=True,
|
||||
)
|
||||
except DockerFileNotFound as exc:
|
||||
raise AnsibleFileNotFound(to_text(exc)) from exc
|
||||
except DockerFileCopyError as exc:
|
||||
raise AnsibleConnectionFailure(to_text(exc)) from exc
|
||||
|
||||
while True:
|
||||
if in_path in considered_in_paths:
|
||||
raise AnsibleConnectionFailure('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path))
|
||||
considered_in_paths.add(in_path)
|
||||
|
||||
display.vvvv('FETCH: Fetching "%s"' % in_path, host=self.get_option('remote_addr'))
|
||||
stream, stats = self._call_client(lambda: self.client.get_archive(
|
||||
self.get_option('remote_addr'),
|
||||
in_path,
|
||||
), not_found_can_be_resource=True)
|
||||
|
||||
# TODO: stream tar file instead of downloading it into a BytesIO
|
||||
|
||||
bio = io.BytesIO()
|
||||
for chunk in stream:
|
||||
bio.write(chunk)
|
||||
bio.seek(0)
|
||||
|
||||
with tarfile.open(fileobj=bio, mode='r|') as tar:
|
||||
symlink_member = None
|
||||
first = True
|
||||
for member in tar:
|
||||
if not first:
|
||||
raise AnsibleConnectionFailure('Received tarfile contains more than one file!')
|
||||
first = False
|
||||
if member.issym():
|
||||
symlink_member = member
|
||||
continue
|
||||
if not member.isfile():
|
||||
raise AnsibleConnectionFailure('Remote file "%s" is not a regular file or a symbolic link' % in_path)
|
||||
in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
|
||||
with open(b_out_path, 'wb') as out_f:
|
||||
shutil.copyfileobj(in_f, out_f, member.size)
|
||||
if first:
|
||||
raise AnsibleConnectionFailure('Received tarfile is empty!')
|
||||
# If the only member was a file, it's already extracted. If it is a symlink, process it now.
|
||||
if symlink_member is not None:
|
||||
in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
|
||||
display.vvvv('FETCH: Following symbolic link to "%s"' % in_path, host=self.get_option('remote_addr'))
|
||||
continue
|
||||
return
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
super(Connection, self).close()
|
||||
def close(self) -> None:
|
||||
"""Terminate the connection. Nothing to do for Docker"""
|
||||
super().close() # type: ignore[safe-super]
|
||||
self._connected = False
|
||||
|
||||
def reset(self):
|
||||
def reset(self) -> None:
|
||||
self.ids.clear()
|
||||
|
||||
@ -1,77 +1,76 @@
|
||||
# (c) 2021 Jeff Goldschrafe <jeff@holyhandgrenade.org>
|
||||
# Copyright (c) 2021 Jeff Goldschrafe <jeff@holyhandgrenade.org>
|
||||
# Based on Ansible local connection plugin by:
|
||||
# (c) 2012 Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# Copyright (c) 2012 Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Copyright (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: nsenter
|
||||
short_description: execute on host running controller container
|
||||
version_added: 1.9.0
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: nsenter
|
||||
short_description: execute on host running controller container
|
||||
version_added: 1.9.0
|
||||
description:
|
||||
- This connection plugin allows Ansible, running in a privileged container, to execute tasks on the container host instead
|
||||
of in the container itself.
|
||||
- This is useful for running Ansible in a pull model, while still keeping the Ansible control node containerized.
|
||||
- It relies on having privileged access to run C(nsenter) in the host's PID namespace, allowing it to enter the namespaces
|
||||
of the provided PID (default PID 1, or init/systemd).
|
||||
author: Jeff Goldschrafe (@jgoldschrafe)
|
||||
options:
|
||||
nsenter_pid:
|
||||
description:
|
||||
- This connection plugin allows Ansible, running in a privileged container, to execute tasks on the container
|
||||
host instead of in the container itself.
|
||||
- This is useful for running Ansible in a pull model, while still keeping the Ansible control node
|
||||
containerized.
|
||||
- It relies on having privileged access to run C(nsenter) in the host's PID namespace, allowing it to enter the
|
||||
namespaces of the provided PID (default PID 1, or init/systemd).
|
||||
author: Jeff Goldschrafe (@jgoldschrafe)
|
||||
options:
|
||||
nsenter_pid:
|
||||
description:
|
||||
- PID to attach with using nsenter.
|
||||
- The default should be fine unless you are attaching as a non-root user.
|
||||
type: int
|
||||
default: 1
|
||||
vars:
|
||||
- name: ansible_nsenter_pid
|
||||
env:
|
||||
- name: ANSIBLE_NSENTER_PID
|
||||
ini:
|
||||
- section: nsenter_connection
|
||||
key: nsenter_pid
|
||||
notes:
|
||||
- The remote user is ignored; this plugin always runs as root.
|
||||
- >-
|
||||
This plugin requires the Ansible controller container to be launched in the following way:
|
||||
(1) The container image contains the C(nsenter) program;
|
||||
(2) The container is launched in privileged mode;
|
||||
(3) The container is launched in the host's PID namespace (C(--pid host)).
|
||||
'''
|
||||
- PID to attach with using nsenter.
|
||||
- The default should be fine unless you are attaching as a non-root user.
|
||||
type: int
|
||||
default: 1
|
||||
vars:
|
||||
- name: ansible_nsenter_pid
|
||||
env:
|
||||
- name: ANSIBLE_NSENTER_PID
|
||||
ini:
|
||||
- section: nsenter_connection
|
||||
key: nsenter_pid
|
||||
notes:
|
||||
- The remote user is ignored; this plugin always runs as root.
|
||||
- "This plugin requires the Ansible controller container to be launched in the following way: (1) The container image contains
|
||||
the C(nsenter) program; (2) The container is launched in privileged mode; (3) The container is launched in the host's
|
||||
PID namespace (C(--pid host))."
|
||||
"""
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import pty
|
||||
import shutil
|
||||
import selectors
|
||||
import shlex
|
||||
import subprocess
|
||||
import fcntl
|
||||
import typing as t
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.module_utils.compat import selectors
|
||||
from ansible.module_utils.six import binary_type, text_type
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.path import unfrackpath
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
'''Connections to a container host using nsenter
|
||||
'''
|
||||
"""Connections to a container host using nsenter"""
|
||||
|
||||
transport = 'community.docker.nsenter'
|
||||
transport = "community.docker.nsenter"
|
||||
has_pipelining = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Connection, self).__init__(*args, **kwargs)
|
||||
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
self.cwd = None
|
||||
self._nsenter_pid = None
|
||||
|
||||
def _connect(self):
|
||||
def _connect(self) -> t.Self:
|
||||
self._nsenter_pid = self.get_option("nsenter_pid")
|
||||
|
||||
# Because nsenter requires very high privileges, our remote user
|
||||
@ -80,24 +79,28 @@ class Connection(ConnectionBase):
|
||||
|
||||
if not self._connected:
|
||||
display.vvv(
|
||||
u"ESTABLISH NSENTER CONNECTION FOR USER: {0}".format(
|
||||
self._play_context.remote_user
|
||||
),
|
||||
f"ESTABLISH NSENTER CONNECTION FOR USER: {self._play_context.remote_user}",
|
||||
host=self._play_context.remote_addr,
|
||||
)
|
||||
self._connected = True
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=True):
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
def exec_command(
|
||||
self, cmd: str, in_data: bytes | None = None, sudoable: bool = True
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # type: ignore[safe-super]
|
||||
|
||||
display.debug("in nsenter.exec_command()")
|
||||
|
||||
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
|
||||
# pylint: disable-next=no-member
|
||||
def_executable: str | None = C.DEFAULT_EXECUTABLE # type: ignore[attr-defined]
|
||||
executable = def_executable.split()[0] if def_executable else None
|
||||
|
||||
if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
|
||||
raise AnsibleError("failed to find the executable specified %s."
|
||||
" Please verify if the executable exists and re-try." % executable)
|
||||
if not os.path.exists(to_bytes(executable, errors="surrogate_or_strict")):
|
||||
raise AnsibleError(
|
||||
f"failed to find the executable specified {executable}."
|
||||
" Please verify if the executable exists and re-try."
|
||||
)
|
||||
|
||||
# Rewrite the provided command to prefix it with nsenter
|
||||
nsenter_cmd_parts = [
|
||||
@ -108,18 +111,14 @@ class Connection(ConnectionBase):
|
||||
"--pid",
|
||||
"--uts",
|
||||
"--preserve-credentials",
|
||||
"--target={0}".format(self._nsenter_pid),
|
||||
f"--target={self._nsenter_pid}",
|
||||
"--",
|
||||
]
|
||||
|
||||
if isinstance(cmd, (text_type, binary_type)):
|
||||
cmd_parts = nsenter_cmd_parts + [cmd]
|
||||
cmd = to_bytes(" ".join(cmd_parts))
|
||||
else:
|
||||
cmd_parts = nsenter_cmd_parts + cmd
|
||||
cmd = [to_bytes(arg) for arg in cmd_parts]
|
||||
cmd_parts = nsenter_cmd_parts + [cmd]
|
||||
cmd_b = to_bytes(" ".join(cmd_parts))
|
||||
|
||||
display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
|
||||
display.vvv(f"EXEC {to_text(cmd_b)}", host=self._play_context.remote_addr)
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
master = None
|
||||
@ -128,112 +127,162 @@ class Connection(ConnectionBase):
|
||||
# This plugin does not support pipelining. This diverges from the behavior of
|
||||
# the core "local" connection plugin that this one derives from.
|
||||
if sudoable and self.become and self.become.expect_prompt():
|
||||
# Create a pty if sudoable for privlege escalation that needs it.
|
||||
# Create a pty if sudoable for privilege escalation that needs it.
|
||||
# Falls back to using a standard pipe if this fails, which may
|
||||
# cause the command to fail in certain situations where we are escalating
|
||||
# privileges or the command otherwise needs a pty.
|
||||
try:
|
||||
master, stdin = pty.openpty()
|
||||
except (IOError, OSError) as e:
|
||||
display.debug("Unable to open pty: %s" % to_native(e))
|
||||
display.debug(f"Unable to open pty: {e}")
|
||||
|
||||
p = subprocess.Popen(
|
||||
cmd,
|
||||
shell=isinstance(cmd, (text_type, binary_type)),
|
||||
executable=executable if isinstance(cmd, (text_type, binary_type)) else None,
|
||||
with subprocess.Popen(
|
||||
cmd_b,
|
||||
shell=True,
|
||||
executable=executable,
|
||||
cwd=self.cwd,
|
||||
stdin=stdin,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
) as p:
|
||||
assert p.stderr is not None
|
||||
assert p.stdin is not None
|
||||
assert p.stdout is not None
|
||||
# if we created a master, we can close the other half of the pty now, otherwise master is stdin
|
||||
if master is not None:
|
||||
os.close(stdin)
|
||||
|
||||
# if we created a master, we can close the other half of the pty now, otherwise master is stdin
|
||||
if master is not None:
|
||||
os.close(stdin)
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
display.debug("done running command with Popen()")
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(
|
||||
p.stdout,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stderr,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||
)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
become_output = b""
|
||||
try:
|
||||
while not self.become.check_success(
|
||||
become_output
|
||||
) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self._play_context.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError(
|
||||
"timeout waiting for privilege escalation password prompt:\n"
|
||||
+ to_text(become_output)
|
||||
)
|
||||
|
||||
become_output = b''
|
||||
try:
|
||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self._play_context.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||
chunks = b""
|
||||
for key, dummy_event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
if chunk:
|
||||
chunks += chunk
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
if chunk:
|
||||
chunks += chunk
|
||||
|
||||
for key, event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
if not chunks:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError(
|
||||
"privilege output closed while waiting for password prompt:\n"
|
||||
+ to_text(become_output)
|
||||
)
|
||||
become_output += chunks
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not chunk:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||
become_output += chunk
|
||||
finally:
|
||||
selector.close()
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option(
|
||||
"become_pass", playcontext=self._play_context
|
||||
)
|
||||
if master is None:
|
||||
p.stdin.write(
|
||||
to_bytes(become_pass, errors="surrogate_or_strict") + b"\n"
|
||||
)
|
||||
else:
|
||||
os.write(
|
||||
master,
|
||||
to_bytes(become_pass, errors="surrogate_or_strict") + b"\n",
|
||||
)
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
if master is None:
|
||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
else:
|
||||
os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
fcntl.fcntl(
|
||||
p.stdout,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stderr,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||
)
|
||||
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
# finally, close the other half of the pty, if it was created
|
||||
if master:
|
||||
os.close(master)
|
||||
|
||||
# finally, close the other half of the pty, if it was created
|
||||
if master:
|
||||
os.close(master)
|
||||
display.debug("done with nsenter.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
display.debug("done with nsenter.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
def put_file(self, in_path: str, out_path: str) -> None:
|
||||
super().put_file(in_path, out_path) # type: ignore[safe-super]
|
||||
|
||||
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||
|
||||
display.vvv(u"PUT {0} to {1}".format(in_path, out_path), host=self._play_context.remote_addr)
|
||||
display.vvv(f"PUT {in_path} to {out_path}", host=self._play_context.remote_addr)
|
||||
try:
|
||||
with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
|
||||
in_data = in_file.read()
|
||||
rc, out, err = self.exec_command(cmd=["tee", out_path], in_data=in_data)
|
||||
rc, dummy_out, err = self.exec_command(
|
||||
cmd=f"tee {shlex.quote(out_path)}", in_data=in_data
|
||||
)
|
||||
if rc != 0:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, err))
|
||||
raise AnsibleError(
|
||||
f"failed to transfer file to {out_path}: {to_text(err)}"
|
||||
)
|
||||
except IOError as e:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, to_native(e)))
|
||||
raise AnsibleError(f"failed to transfer file to {out_path}: {e}") from e
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
def fetch_file(self, in_path: str, out_path: str) -> None:
|
||||
super().fetch_file(in_path, out_path) # type: ignore[safe-super]
|
||||
|
||||
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||
|
||||
try:
|
||||
rc, out, err = self.exec_command(cmd=["cat", in_path])
|
||||
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
|
||||
rc, out, err = self.exec_command(cmd=f"cat {shlex.quote(in_path)}")
|
||||
display.vvv(
|
||||
f"FETCH {in_path} TO {out_path}", host=self._play_context.remote_addr
|
||||
)
|
||||
if rc != 0:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(in_path, err))
|
||||
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||
raise AnsibleError(
|
||||
f"failed to transfer file to {in_path}: {to_text(err)}"
|
||||
)
|
||||
with open(
|
||||
to_bytes(out_path, errors="surrogate_or_strict"), "wb"
|
||||
) as out_file:
|
||||
out_file.write(out)
|
||||
except IOError as e:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
|
||||
raise AnsibleError(
|
||||
f"failed to transfer file to {to_text(out_path)}: {e}"
|
||||
) from e
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
def close(self) -> None:
|
||||
"""terminate the connection; nothing to do here"""
|
||||
self._connected = False
|
||||
|
||||
110
plugins/doc_fragments/_attributes.py
Normal file
110
plugins/doc_fragments/_attributes.py
Normal file
@ -0,0 +1,110 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
class ModuleDocFragment:
|
||||
|
||||
# Standard documentation fragment
|
||||
DOCUMENTATION = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
description: Can run in C(check_mode) and return changed status prediction without modifying target.
|
||||
diff_mode:
|
||||
description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
|
||||
idempotent:
|
||||
description:
|
||||
- When run twice in a row outside check mode, with the same arguments, the second invocation indicates no change.
|
||||
- This assumes that the system controlled/queried by the module has not changed in a relevant way.
|
||||
"""
|
||||
|
||||
# Should be used together with the standard fragment
|
||||
IDEMPOTENT_NOT_MODIFY_STATE = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
idempotent:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
"""
|
||||
|
||||
# Should be used together with the standard fragment
|
||||
INFO_MODULE = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
diff_mode:
|
||||
support: N/A
|
||||
details:
|
||||
- This action does not modify state.
|
||||
"""
|
||||
|
||||
ACTIONGROUP_DOCKER = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
action_group:
|
||||
description: Use C(group/docker) or C(group/community.docker.docker) in C(module_defaults) to set defaults for this module.
|
||||
support: full
|
||||
membership:
|
||||
- community.docker.docker
|
||||
- docker
|
||||
"""
|
||||
|
||||
CONN = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
become:
|
||||
description: Is usable alongside C(become) keywords.
|
||||
connection:
|
||||
description: Uses the target's configured connection information to execute code on it.
|
||||
delegation:
|
||||
description: Can be used in conjunction with C(delegate_to) and related keywords.
|
||||
"""
|
||||
|
||||
FACTS = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
facts:
|
||||
description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
|
||||
"""
|
||||
|
||||
# Should be used together with the standard fragment and the FACTS fragment
|
||||
FACTS_MODULE = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
diff_mode:
|
||||
support: N/A
|
||||
details:
|
||||
- This action does not modify state.
|
||||
facts:
|
||||
support: full
|
||||
"""
|
||||
|
||||
FILES = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
safe_file_operations:
|
||||
description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
|
||||
"""
|
||||
|
||||
FLOW = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
action:
|
||||
description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
|
||||
async:
|
||||
description: Supports being used with the C(async) keyword.
|
||||
"""
|
||||
82
plugins/doc_fragments/_compose_v2.py
Normal file
82
plugins/doc_fragments/_compose_v2.py
Normal file
@ -0,0 +1,82 @@
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
class ModuleDocFragment:
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
project_src:
|
||||
description:
|
||||
- Path to a directory containing a Compose file (C(compose.yml), C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)).
|
||||
- If O(files) is provided, will look for these files in this directory instead.
|
||||
- Mutually exclusive with O(definition). One of O(project_src) and O(definition) must be provided.
|
||||
type: path
|
||||
project_name:
|
||||
description:
|
||||
- Provide a project name. If not provided, the project name is taken from the basename of O(project_src).
|
||||
- Required when O(definition) is provided.
|
||||
type: str
|
||||
files:
|
||||
description:
|
||||
- List of Compose file names relative to O(project_src) to be used instead of the main Compose file (C(compose.yml),
|
||||
C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)).
|
||||
- Files are loaded and merged in the order given.
|
||||
- Mutually exclusive with O(definition).
|
||||
type: list
|
||||
elements: path
|
||||
version_added: 3.7.0
|
||||
definition:
|
||||
description:
|
||||
- Compose file describing one or more services, networks and volumes.
|
||||
- Mutually exclusive with O(project_src) and O(files). One of O(project_src) and O(definition) must be provided.
|
||||
- If provided, PyYAML must be available to this module, and O(project_name) must be specified.
|
||||
- Note that a temporary directory will be created and deleted afterwards when using this option.
|
||||
type: dict
|
||||
version_added: 3.9.0
|
||||
env_files:
|
||||
description:
|
||||
- By default environment files are loaded from a C(.env) file located directly under the O(project_src) directory.
|
||||
- O(env_files) can be used to specify the path of one or multiple custom environment files instead.
|
||||
- The path is relative to the O(project_src) directory.
|
||||
type: list
|
||||
elements: path
|
||||
profiles:
|
||||
description:
|
||||
- List of profiles to enable when starting services.
|
||||
- Equivalent to C(docker compose --profile).
|
||||
type: list
|
||||
elements: str
|
||||
check_files_existing:
|
||||
description:
|
||||
- If set to V(false), the module will not check whether one of the files C(compose.yaml), C(compose.yml), C(docker-compose.yaml),
|
||||
or C(docker-compose.yml) exists in O(project_src) if O(files) is not provided.
|
||||
- This can be useful if environment files with C(COMPOSE_FILE) are used to configure a different filename. The module
|
||||
currently does not check for C(COMPOSE_FILE) in environment files or the current environment.
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 3.9.0
|
||||
requirements:
|
||||
- "PyYAML if O(definition) is used"
|
||||
notes:
|
||||
- |-
|
||||
The Docker compose CLI plugin has no stable output format (see for example U(https://github.com/docker/compose/issues/10872)),
|
||||
and for the main operations also no machine friendly output format. The module tries to accomodate this with various
|
||||
version-dependent behavior adjustments and with testing older and newer versions of the Docker compose CLI plugin.
|
||||
Currently the module is tested with multiple plugin versions between 2.18.1 and 2.23.3. The exact list of plugin versions
|
||||
will change over time. New releases of the Docker compose CLI plugin can break this module at any time.
|
||||
"""
|
||||
|
||||
# The following needs to be kept in sync with the compose_v2 module utils
|
||||
MINIMUM_VERSION = r"""
|
||||
options: {}
|
||||
requirements:
|
||||
- "Docker CLI with Docker compose plugin 2.18.0 or later"
|
||||
"""
|
||||
389
plugins/doc_fragments/_docker.py
Normal file
389
plugins/doc_fragments/_docker.py
Normal file
@ -0,0 +1,389 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
class ModuleDocFragment:
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the TCP connection
|
||||
string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, the module will automatically
|
||||
replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix:///var/run/docker.sock
|
||||
aliases:
|
||||
- docker_url
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
- Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0.
|
||||
- B(Note:) this option is no longer supported for Docker SDK for Python 7.0.0+. Specifying it with Docker SDK for Python
|
||||
7.0.0 or newer will lead to an error.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases:
|
||||
- docker_api_version
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(ca.pem)
|
||||
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has been added
|
||||
as an alias and can still be used.
|
||||
type: path
|
||||
aliases:
|
||||
- ca_cert
|
||||
- tls_ca_cert
|
||||
- cacert_path
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(cert.pem)
|
||||
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_cert
|
||||
- cert_path
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(key.pem)
|
||||
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_key
|
||||
- key_path
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Note that
|
||||
if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used instead. If
|
||||
the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
aliases:
|
||||
- tls_verify
|
||||
debug:
|
||||
description:
|
||||
- Debug mode.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables. You can define
|
||||
E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH), E(DOCKER_TLS), E(DOCKER_TLS_VERIFY)
|
||||
and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped with the product that sets up the environment.
|
||||
It will set these variables for you. See U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- When connecting to Docker daemon with TLS, you might need to install additional Python packages. For the Docker SDK for
|
||||
Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
||||
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified, and
|
||||
use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
"""
|
||||
|
||||
# For plugins: allow to define common options with Ansible variables
|
||||
|
||||
VAR_NAMES = r"""
|
||||
options:
|
||||
docker_host:
|
||||
vars:
|
||||
- name: ansible_docker_docker_host
|
||||
tls_hostname:
|
||||
vars:
|
||||
- name: ansible_docker_tls_hostname
|
||||
api_version:
|
||||
vars:
|
||||
- name: ansible_docker_api_version
|
||||
timeout:
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
ca_path:
|
||||
vars:
|
||||
- name: ansible_docker_ca_cert
|
||||
- name: ansible_docker_ca_path
|
||||
version_added: 3.6.0
|
||||
client_cert:
|
||||
vars:
|
||||
- name: ansible_docker_client_cert
|
||||
client_key:
|
||||
vars:
|
||||
- name: ansible_docker_client_key
|
||||
tls:
|
||||
vars:
|
||||
- name: ansible_docker_tls
|
||||
validate_certs:
|
||||
vars:
|
||||
- name: ansible_docker_validate_certs
|
||||
"""
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||
|
||||
DOCKER_PY_2_DOCUMENTATION = r"""
|
||||
options: {}
|
||||
notes:
|
||||
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon.
|
||||
requirements:
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
This module does B(not) work with docker-py."
|
||||
"""
|
||||
|
||||
# Docker doc fragment when using the vendored API access code
|
||||
API_DOCUMENTATION = r"""
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix:///var/run/docker.sock
|
||||
aliases:
|
||||
- docker_url
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
- Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by this collection and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases:
|
||||
- docker_api_version
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has
|
||||
been added as an alias and can still be used.
|
||||
type: path
|
||||
aliases:
|
||||
- ca_cert
|
||||
- tls_ca_cert
|
||||
- cacert_path
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_cert
|
||||
- cert_path
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_key
|
||||
- key_path
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
aliases:
|
||||
- tls_verify
|
||||
debug:
|
||||
description:
|
||||
- Debug mode
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH),
|
||||
E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
# - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
# In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified,
|
||||
# and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
- This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon. It uses code derived from the Docker SDK or Python that is included in this
|
||||
collection.
|
||||
requirements:
|
||||
- requests
|
||||
- pywin32 (when using named pipes on Windows 32)
|
||||
- paramiko (when using SSH with O(use_ssh_client=false))
|
||||
- pyOpenSSL (when using TLS)
|
||||
"""
|
||||
|
||||
# Docker doc fragment when using the Docker CLI
|
||||
CLI_DOCUMENTATION = r"""
|
||||
options:
|
||||
docker_cli:
|
||||
description:
|
||||
- Path to the Docker CLI. If not provided, will search for Docker CLI on the E(PATH).
|
||||
type: path
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
- Mutually exclusive with O(cli_context). If neither O(docker_host) nor O(cli_context) are provided, the
|
||||
value V(unix:///var/run/docker.sock) is used.
|
||||
type: str
|
||||
aliases:
|
||||
- docker_url
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by this collection and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases:
|
||||
- docker_api_version
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- ca_cert
|
||||
- tls_ca_cert
|
||||
- cacert_path
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_cert
|
||||
- cert_path
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_key
|
||||
- key_path
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
aliases:
|
||||
- tls_verify
|
||||
# debug:
|
||||
# description:
|
||||
# - Debug mode
|
||||
# type: bool
|
||||
# default: false
|
||||
cli_context:
|
||||
description:
|
||||
- The Docker CLI context to use.
|
||||
- Mutually exclusive with O(docker_host).
|
||||
type: str
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH),
|
||||
E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon. It directly calls the Docker CLI program.
|
||||
"""
|
||||
@ -1,187 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r'''
|
||||
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix://var/run/docker.sock
|
||||
aliases: [ docker_url ]
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
- The current default value is C(localhost). This default is deprecated and will change in community.docker
|
||||
2.0.0 to be a value computed from I(docker_host). Explicitly specify C(localhost) to make sure this value
|
||||
will still be used, and to disable the deprecation message which will be shown otherwise.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_cert:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ssl_version:
|
||||
description:
|
||||
- Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
|
||||
used instead.
|
||||
type: str
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: no
|
||||
version_added: 1.5.0
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
debug:
|
||||
description:
|
||||
- Debug mode
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
|
||||
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
|
||||
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
||||
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
|
||||
and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
'''
|
||||
|
||||
# For plugins: allow to define common options with Ansible variables
|
||||
|
||||
VAR_NAMES = r'''
|
||||
options:
|
||||
docker_host:
|
||||
vars:
|
||||
- name: ansible_docker_docker_host
|
||||
tls_hostname:
|
||||
vars:
|
||||
- name: ansible_docker_tls_hostname
|
||||
api_version:
|
||||
vars:
|
||||
- name: ansible_docker_api_version
|
||||
timeout:
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
ca_cert:
|
||||
vars:
|
||||
- name: ansible_docker_ca_cert
|
||||
client_cert:
|
||||
vars:
|
||||
- name: ansible_docker_client_cert
|
||||
client_key:
|
||||
vars:
|
||||
- name: ansible_docker_client_key
|
||||
ssl_version:
|
||||
vars:
|
||||
- name: ansible_docker_ssl_version
|
||||
tls:
|
||||
vars:
|
||||
- name: ansible_docker_tls
|
||||
validate_certs:
|
||||
vars:
|
||||
- name: ansible_docker_validate_certs
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
|
||||
|
||||
DOCKER_PY_1_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
notes:
|
||||
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon.
|
||||
requirements:
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
|
||||
install the C(docker) Python module. Note that both modules should *not*
|
||||
be installed at the same time. Also note that when both modules are installed
|
||||
and one of them is uninstalled, the other might no longer function and a
|
||||
reinstall of it is required."
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
|
||||
|
||||
DOCKER_PY_2_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
notes:
|
||||
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon.
|
||||
requirements:
|
||||
- "Python >= 2.7"
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
This module does *not* work with docker-py."
|
||||
'''
|
||||
@ -1,132 +1,138 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
|
||||
# For the parts taken from the docker inventory script:
|
||||
# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
|
||||
# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
|
||||
# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_containers
|
||||
short_description: Ansible dynamic inventory plugin for Docker containers.
|
||||
short_description: Ansible dynamic inventory plugin for Docker containers
|
||||
version_added: 1.1.0
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
requirements:
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
- Felix Fontein (@felixfontein)
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
- ansible.builtin.constructed
|
||||
- community.docker._docker.api_documentation
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Reads inventories from the Docker API.
|
||||
- Uses a YAML configuration file that ends with C(docker.[yml|yaml]).
|
||||
- Reads inventories from the Docker API.
|
||||
- Uses a YAML configuration file that ends with V(docker.(yml|yaml\)).
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker.yml) or V(docker.yaml). Other filenames will
|
||||
not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description:
|
||||
- The name of this plugin, it should always be set to C(community.docker.docker_containers)
|
||||
for this plugin to recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ community.docker.docker_containers ]
|
||||
plugin:
|
||||
description:
|
||||
- The name of this plugin, it should always be set to V(community.docker.docker_containers) for this plugin to recognize
|
||||
it as its own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [community.docker.docker_containers]
|
||||
|
||||
connection_type:
|
||||
description:
|
||||
- Which connection type to use the containers.
|
||||
- One way to connect to containers is to use SSH (C(ssh)). For this, the options I(default_ip) and
|
||||
I(private_ssh_port) are used. This requires that a SSH daemon is running inside the containers.
|
||||
- Alternatively, C(docker-cli) selects the
|
||||
R(docker connection plugin,ansible_collections.community.docker.docker_connection),
|
||||
and C(docker-api) (default) selects the
|
||||
R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection).
|
||||
- When C(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin
|
||||
to the connection plugin. This can be controlled with I(configure_docker_daemon).
|
||||
type: str
|
||||
default: docker-api
|
||||
choices:
|
||||
- ssh
|
||||
- docker-cli
|
||||
- docker-api
|
||||
connection_type:
|
||||
description:
|
||||
- Which connection type to use the containers.
|
||||
- One way to connect to containers is to use SSH (V(ssh)). For this, the options O(default_ip) and O(private_ssh_port)
|
||||
are used. This requires that a SSH daemon is running inside the containers.
|
||||
- Alternatively, V(docker-cli) selects the P(community.docker.docker#connection) connection plugin, and V(docker-api)
|
||||
(default) selects the P(community.docker.docker_api#connection) connection plugin.
|
||||
- When V(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin to the connection
|
||||
plugin. This can be controlled with O(configure_docker_daemon).
|
||||
- Note that the P(community.docker.docker_api#connection) does B(not work with TCP TLS sockets)!
|
||||
See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
|
||||
type: str
|
||||
default: docker-api
|
||||
choices:
|
||||
- ssh
|
||||
- docker-cli
|
||||
- docker-api
|
||||
|
||||
configure_docker_daemon:
|
||||
description:
|
||||
- Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
|
||||
- Only used when I(connection_type=docker-api).
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 1.8.0
|
||||
configure_docker_daemon:
|
||||
description:
|
||||
- Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
|
||||
- Only used when O(connection_type=docker-api).
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 1.8.0
|
||||
|
||||
verbose_output:
|
||||
description:
|
||||
- Toggle to (not) include all available inspection metadata.
|
||||
- Note that all top-level keys will be transformed to the format C(docker_xxx).
|
||||
For example, C(HostConfig) is converted to C(docker_hostconfig).
|
||||
- If this is C(false), these values can only be used during I(constructed), I(groups), and I(keyed_groups).
|
||||
- The C(docker) inventory script always added these variables, so for compatibility set this to C(true).
|
||||
type: bool
|
||||
default: false
|
||||
verbose_output:
|
||||
description:
|
||||
- Toggle to (not) include all available inspection metadata.
|
||||
- Note that all top-level keys will be transformed to the format C(docker_xxx). For example, C(HostConfig) is converted
|
||||
to C(docker_hostconfig).
|
||||
- If this is V(false), these values can only be used during O(compose), O(groups), and O(keyed_groups).
|
||||
- The C(docker) inventory script always added these variables, so for compatibility set this to V(true).
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
default_ip:
|
||||
description:
|
||||
- The IP address to assign to ansible_host when the container's SSH port is mapped to interface
|
||||
'0.0.0.0'.
|
||||
- Only used if I(connection_type) is C(ssh).
|
||||
type: str
|
||||
default: 127.0.0.1
|
||||
default_ip:
|
||||
description:
|
||||
- The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
|
||||
- Only used if O(connection_type) is V(ssh).
|
||||
type: str
|
||||
default: 127.0.0.1
|
||||
|
||||
private_ssh_port:
|
||||
description:
|
||||
- The port containers use for SSH.
|
||||
- Only used if I(connection_type) is C(ssh).
|
||||
type: int
|
||||
default: 22
|
||||
private_ssh_port:
|
||||
description:
|
||||
- The port containers use for SSH.
|
||||
- Only used if O(connection_type) is V(ssh).
|
||||
type: int
|
||||
default: 22
|
||||
|
||||
add_legacy_groups:
|
||||
description:
|
||||
- "Add the same groups as the C(docker) inventory script does. These are the following:"
|
||||
- "C(<container id>): contains the container of this ID."
|
||||
- "C(<container name>): contains the container that has this name."
|
||||
- "C(<container short id>): contains the containers that have this short ID (first 13 letters of ID)."
|
||||
- "C(image_<image name>): contains the containers that have the image C(<image name>)."
|
||||
- "C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>)."
|
||||
- "C(service_<service name>): contains the containers that belong to the service C(<service name>)"
|
||||
- "C(<docker_host>): contains the containers which belong to the Docker daemon I(docker_host).
|
||||
Useful if you run this plugin against multiple Docker daemons."
|
||||
- "C(running): contains all containers that are running."
|
||||
- "C(stopped): contains all containers that are not running."
|
||||
- If this is not set to C(true), you should use keyed groups to add the containers to groups.
|
||||
See the examples for how to do that.
|
||||
type: bool
|
||||
default: false
|
||||
'''
|
||||
add_legacy_groups:
|
||||
description:
|
||||
- 'Add the same groups as the C(docker) inventory script does. These are the following:'
|
||||
- 'C(<container id>): contains the container of this ID.'
|
||||
- 'C(<container name>): contains the container that has this name.'
|
||||
- 'C(<container short id>): contains the containers that have this short ID (first 13 letters of ID).'
|
||||
- 'C(image_<image name>): contains the containers that have the image C(<image name>).'
|
||||
- 'C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>).'
|
||||
- 'C(service_<service name>): contains the containers that belong to the service C(<service name>).'
|
||||
- 'C(<docker_host>): contains the containers which belong to the Docker daemon O(docker_host). Useful if you run this
|
||||
plugin against multiple Docker daemons.'
|
||||
- 'C(running): contains all containers that are running.'
|
||||
- 'C(stopped): contains all containers that are not running.'
|
||||
- If this is not set to V(true), you should use keyed groups to add the containers to groups. See the examples for how
|
||||
to do that.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
EXAMPLES = '''
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
# Minimal example using local Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: unix://var/run/docker.sock
|
||||
docker_host: unix:///var/run/docker.sock
|
||||
|
||||
---
|
||||
# Minimal example using remote Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
---
|
||||
# Example using remote Docker daemon with unverified TLS
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: true
|
||||
|
||||
---
|
||||
# Example using remote Docker daemon with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: true
|
||||
ca_cert: /somewhere/ca.pem
|
||||
ca_path: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
---
|
||||
# Example using constructed features to create groups
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
@ -139,6 +145,7 @@ keyed_groups:
|
||||
- prefix: os
|
||||
key: docker_platform
|
||||
|
||||
---
|
||||
# Example using SSH connection with an explicit fallback for when port 22 has not been
|
||||
# exported: use container name as ansible_ssh_host and 22 as ansible_ssh_port
|
||||
plugin: community.docker.docker_containers
|
||||
@ -146,203 +153,273 @@ connection_type: ssh
|
||||
compose:
|
||||
ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true)
|
||||
ansible_ssh_port: ansible_ssh_port | default(22, true)
|
||||
'''
|
||||
|
||||
---
|
||||
# Only consider containers which have a label 'foo', or whose name starts with 'a'
|
||||
plugin: community.docker.docker_containers
|
||||
filters:
|
||||
# Accept all containers which have a label called 'foo'
|
||||
- include: >-
|
||||
"foo" in docker_config.Labels
|
||||
# Next accept all containers whose inventory_hostname starts with 'a'
|
||||
- include: >-
|
||||
inventory_hostname.startswith("a")
|
||||
# Exclude all containers that did not match any of the above filters
|
||||
- exclude: true
|
||||
"""
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||
filter_host,
|
||||
parse_filters,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import (
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DOCKER_COMMON_ARGS_VARS,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.common import (
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||
make_unsafe,
|
||||
)
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except Exception:
|
||||
# missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
|
||||
pass
|
||||
|
||||
MIN_DOCKER_PY = '1.7.0'
|
||||
MIN_DOCKER_API = None
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker daemon as source. '''
|
||||
"""Host inventory parser for ansible using Docker daemon as source."""
|
||||
|
||||
NAME = 'community.docker.docker_containers'
|
||||
NAME = "community.docker.docker_containers"
|
||||
|
||||
def _slugify(self, value):
|
||||
return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
|
||||
def _slugify(self, value: str) -> str:
|
||||
slug = re.sub(r"[^\w-]", "_", value).lower().lstrip("_")
|
||||
return f"docker_{slug}"
|
||||
|
||||
def _populate(self, client):
|
||||
strict = self.get_option('strict')
|
||||
def _populate(self, client: AnsibleDockerClient) -> None:
|
||||
strict = self.get_option("strict")
|
||||
|
||||
ssh_port = self.get_option('private_ssh_port')
|
||||
default_ip = self.get_option('default_ip')
|
||||
hostname = self.get_option('docker_host')
|
||||
verbose_output = self.get_option('verbose_output')
|
||||
connection_type = self.get_option('connection_type')
|
||||
add_legacy_groups = self.get_option('add_legacy_groups')
|
||||
ssh_port = self.get_option("private_ssh_port")
|
||||
default_ip = self.get_option("default_ip")
|
||||
hostname = self.get_option("docker_host")
|
||||
verbose_output = self.get_option("verbose_output")
|
||||
connection_type = self.get_option("connection_type")
|
||||
add_legacy_groups = self.get_option("add_legacy_groups")
|
||||
|
||||
if self.inventory is None:
|
||||
raise AssertionError("Inventory must be there")
|
||||
|
||||
try:
|
||||
containers = client.containers(all=True)
|
||||
params = {
|
||||
"limit": -1,
|
||||
"all": 1,
|
||||
"size": 0,
|
||||
"trunc_cmd": 0,
|
||||
"since": None,
|
||||
"before": None,
|
||||
}
|
||||
containers = client.get_json("/containers/json", params=params)
|
||||
except APIError as exc:
|
||||
raise AnsibleError("Error listing containers: %s" % to_native(exc))
|
||||
raise AnsibleError(f"Error listing containers: {exc}") from exc
|
||||
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group('running')
|
||||
self.inventory.add_group('stopped')
|
||||
self.inventory.add_group("running")
|
||||
self.inventory.add_group("stopped")
|
||||
|
||||
extra_facts = {}
|
||||
if self.get_option('configure_docker_daemon'):
|
||||
if self.get_option("configure_docker_daemon"):
|
||||
for option_name, var_name in DOCKER_COMMON_ARGS_VARS.items():
|
||||
value = self.get_option(option_name)
|
||||
if value is not None:
|
||||
extra_facts[var_name] = value
|
||||
|
||||
filters = parse_filters(self.get_option("filters"))
|
||||
for container in containers:
|
||||
id = container.get('Id')
|
||||
short_id = id[:13]
|
||||
container_id = container.get("Id")
|
||||
short_container_id = container_id[:13]
|
||||
|
||||
try:
|
||||
name = container.get('Names', list())[0].lstrip('/')
|
||||
name = container.get("Names", [])[0].lstrip("/")
|
||||
full_name = name
|
||||
except IndexError:
|
||||
name = short_id
|
||||
full_name = id
|
||||
name = short_container_id
|
||||
full_name = container_id
|
||||
|
||||
self.inventory.add_host(name)
|
||||
facts = dict(
|
||||
docker_name=name,
|
||||
docker_short_id=short_id
|
||||
)
|
||||
full_facts = dict()
|
||||
facts = {
|
||||
"docker_name": make_unsafe(name),
|
||||
"docker_short_id": make_unsafe(short_container_id),
|
||||
}
|
||||
full_facts = {}
|
||||
|
||||
try:
|
||||
inspect = client.inspect_container(id)
|
||||
inspect = client.get_json("/containers/{0}/json", container_id)
|
||||
except APIError as exc:
|
||||
raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc)))
|
||||
raise AnsibleError(
|
||||
f"Error inspecting container {name} - {exc}"
|
||||
) from exc
|
||||
|
||||
state = inspect.get('State') or dict()
|
||||
config = inspect.get('Config') or dict()
|
||||
labels = config.get('Labels') or dict()
|
||||
state = inspect.get("State") or {}
|
||||
config = inspect.get("Config") or {}
|
||||
labels = config.get("Labels") or {}
|
||||
|
||||
running = state.get('Running')
|
||||
running = state.get("Running")
|
||||
|
||||
groups = []
|
||||
|
||||
# Add container to groups
|
||||
image_name = config.get('Image')
|
||||
image_name = config.get("Image")
|
||||
if image_name and add_legacy_groups:
|
||||
self.inventory.add_group('image_{0}'.format(image_name))
|
||||
self.inventory.add_host(name, group='image_{0}'.format(image_name))
|
||||
groups.append(f"image_{image_name}")
|
||||
|
||||
stack_name = labels.get('com.docker.stack.namespace')
|
||||
stack_name = labels.get("com.docker.stack.namespace")
|
||||
if stack_name:
|
||||
full_facts['docker_stack'] = stack_name
|
||||
full_facts["docker_stack"] = stack_name
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group('stack_{0}'.format(stack_name))
|
||||
self.inventory.add_host(name, group='stack_{0}'.format(stack_name))
|
||||
groups.append(f"stack_{stack_name}")
|
||||
|
||||
service_name = labels.get('com.docker.swarm.service.name')
|
||||
service_name = labels.get("com.docker.swarm.service.name")
|
||||
if service_name:
|
||||
full_facts['docker_service'] = service_name
|
||||
full_facts["docker_service"] = service_name
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group('service_{0}'.format(service_name))
|
||||
self.inventory.add_host(name, group='service_{0}'.format(service_name))
|
||||
groups.append(f"service_{service_name}")
|
||||
|
||||
if connection_type == 'ssh':
|
||||
ansible_connection = None
|
||||
if connection_type == "ssh":
|
||||
# Figure out ssh IP and Port
|
||||
try:
|
||||
# Lookup the public facing port Nat'ed to ssh port.
|
||||
port = client.port(container, ssh_port)[0]
|
||||
network_settings = inspect.get("NetworkSettings") or {}
|
||||
port_settings = network_settings.get("Ports") or {}
|
||||
port = port_settings.get(f"{ssh_port}/tcp")[0] # type: ignore[index]
|
||||
except (IndexError, AttributeError, TypeError):
|
||||
port = dict()
|
||||
port = {}
|
||||
|
||||
try:
|
||||
ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
|
||||
ip = default_ip if port["HostIp"] == "0.0.0.0" else port["HostIp"]
|
||||
except KeyError:
|
||||
ip = ''
|
||||
ip = ""
|
||||
|
||||
facts.update(dict(
|
||||
ansible_ssh_host=ip,
|
||||
ansible_ssh_port=port.get('HostPort', 0),
|
||||
))
|
||||
elif connection_type == 'docker-cli':
|
||||
facts.update(dict(
|
||||
ansible_host=full_name,
|
||||
ansible_connection='community.docker.docker',
|
||||
))
|
||||
elif connection_type == 'docker-api':
|
||||
facts.update(dict(
|
||||
ansible_host=full_name,
|
||||
ansible_connection='community.docker.docker_api',
|
||||
))
|
||||
facts.update(
|
||||
{
|
||||
"ansible_ssh_host": ip,
|
||||
"ansible_ssh_port": port.get("HostPort", 0),
|
||||
}
|
||||
)
|
||||
elif connection_type == "docker-cli":
|
||||
facts.update(
|
||||
{
|
||||
"ansible_host": full_name,
|
||||
}
|
||||
)
|
||||
ansible_connection = "community.docker.docker"
|
||||
elif connection_type == "docker-api":
|
||||
facts.update(
|
||||
{
|
||||
"ansible_host": full_name,
|
||||
}
|
||||
)
|
||||
facts.update(extra_facts)
|
||||
ansible_connection = "community.docker.docker_api"
|
||||
|
||||
full_facts.update(facts)
|
||||
for key, value in inspect.items():
|
||||
fact_key = self._slugify(key)
|
||||
full_facts[fact_key] = value
|
||||
|
||||
full_facts = make_unsafe(full_facts)
|
||||
|
||||
if ansible_connection:
|
||||
for d in (facts, full_facts):
|
||||
if "ansible_connection" not in d:
|
||||
d["ansible_connection"] = ansible_connection
|
||||
|
||||
if not filter_host(self, name, full_facts, filters):
|
||||
continue
|
||||
|
||||
if verbose_output:
|
||||
facts.update(full_facts)
|
||||
|
||||
self.inventory.add_host(name)
|
||||
for group in groups:
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_host(name, group=group)
|
||||
|
||||
for key, value in facts.items():
|
||||
self.inventory.set_variable(name, key, value)
|
||||
|
||||
# Use constructed if applicable
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), full_facts, name, strict=strict)
|
||||
self._set_composite_vars(
|
||||
self.get_option("compose"), full_facts, name, strict=strict
|
||||
)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), full_facts, name, strict=strict)
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option("groups"), full_facts, name, strict=strict
|
||||
)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), full_facts, name, strict=strict)
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option("keyed_groups"), full_facts, name, strict=strict
|
||||
)
|
||||
|
||||
# We need to do this last since we also add a group called `name`.
|
||||
# When we do this before a set_variable() call, the variables are assigned
|
||||
# to the group, and not to the host.
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group(id)
|
||||
self.inventory.add_host(name, group=id)
|
||||
self.inventory.add_group(container_id)
|
||||
self.inventory.add_host(name, group=container_id)
|
||||
self.inventory.add_group(name)
|
||||
self.inventory.add_host(name, group=name)
|
||||
self.inventory.add_group(short_id)
|
||||
self.inventory.add_host(name, group=short_id)
|
||||
self.inventory.add_group(short_container_id)
|
||||
self.inventory.add_host(name, group=short_container_id)
|
||||
self.inventory.add_group(hostname)
|
||||
self.inventory.add_host(name, group=hostname)
|
||||
|
||||
if running is True:
|
||||
self.inventory.add_host(name, group='running')
|
||||
self.inventory.add_host(name, group="running")
|
||||
else:
|
||||
self.inventory.add_host(name, group='stopped')
|
||||
self.inventory.add_host(name, group="stopped")
|
||||
|
||||
def verify_file(self, path):
|
||||
def verify_file(self, path: str) -> bool:
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker.yaml', 'docker.yml')))
|
||||
return super().verify_file(path) and path.endswith(
|
||||
("docker.yaml", "docker.yml")
|
||||
)
|
||||
|
||||
def _create_client(self):
|
||||
return AnsibleDockerClient(self, min_docker_version=MIN_DOCKER_PY, min_docker_api_version=MIN_DOCKER_API)
|
||||
def _create_client(self) -> AnsibleDockerClient:
|
||||
return AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
def parse(
|
||||
self,
|
||||
inventory: InventoryData,
|
||||
loader: DataLoader,
|
||||
path: str,
|
||||
cache: bool = True,
|
||||
) -> None:
|
||||
super().parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
client = self._create_client()
|
||||
try:
|
||||
self._populate(client)
|
||||
except DockerException as e:
|
||||
raise AnsibleError(
|
||||
'An unexpected docker error occurred: {0}'.format(e)
|
||||
)
|
||||
raise AnsibleError(f"An unexpected Docker error occurred: {e}") from e
|
||||
except RequestException as e:
|
||||
raise AnsibleError(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e)
|
||||
)
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}"
|
||||
) from e
|
||||
|
||||
@ -1,63 +1,70 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_machine
|
||||
author: Ximon Eighteen (@ximon18)
|
||||
short_description: Docker Machine inventory source
|
||||
requirements:
|
||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_machine
|
||||
author: Ximon Eighteen (@ximon18)
|
||||
short_description: Docker Machine inventory source
|
||||
requirements:
|
||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Get inventory hosts from Docker Machine.
|
||||
- Uses a YAML configuration file that ends with V(docker_machine.(yml|yaml\)).
|
||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||
- The plugin stores the Docker Machine 'env' output variables in C(dm_) prefixed host variables.
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker_machine.yml) or V(docker_machine.yaml). Other
|
||||
filenames will not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: true
|
||||
choices: ['docker_machine', 'community.docker.docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Get inventory hosts from Docker Machine.
|
||||
- Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
|
||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||
- The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
- With V(require) and V(require-silently), fetch them and skip any host for which they cannot be fetched. A warning
|
||||
will be issued for any skipped host if the choice is V(require).
|
||||
- With V(optional) and V(optional-silently), fetch them and not skip hosts for which they cannot be fetched. A warning
|
||||
will be issued for hosts where they cannot be fetched if the choice is V(optional).
|
||||
- With V(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||
- If fetched successfully, the variables will be prefixed with C(dm_) and stored as host variables.
|
||||
type: str
|
||||
choices:
|
||||
- require
|
||||
- require-silently
|
||||
- optional
|
||||
- optional-silently
|
||||
- skip
|
||||
default: require
|
||||
running_required:
|
||||
description:
|
||||
- When V(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||
type: bool
|
||||
default: true
|
||||
verbose_output:
|
||||
description:
|
||||
- When V(true), include all available nodes metadata (for example C(Image), C(Region), C(Size)) as a JSON object named
|
||||
C(docker_machine_node_attributes).
|
||||
type: bool
|
||||
default: true
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: yes
|
||||
choices: ['docker_machine', 'community.docker.docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
- With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
|
||||
A warning will be issued for any skipped host if the choice is C(require).
|
||||
- With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
|
||||
A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
|
||||
- With C(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||
- If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
|
||||
type: str
|
||||
choices:
|
||||
- require
|
||||
- require-silently
|
||||
- optional
|
||||
- optional-silently
|
||||
- skip
|
||||
default: require
|
||||
running_required:
|
||||
description:
|
||||
- When C(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||
type: bool
|
||||
default: yes
|
||||
verbose_output:
|
||||
description:
|
||||
- When C(true), include all available nodes metadata (for exmaple C(Image), C(Region), C(Size)) as a JSON object
|
||||
named C(docker_machine_node_attributes).
|
||||
type: bool
|
||||
default: yes
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
---
|
||||
# Minimal example
|
||||
plugin: community.docker.docker_machine
|
||||
|
||||
---
|
||||
# Example using constructed features to create a group per Docker Machine driver
|
||||
# (https://docs.docker.com/machine/drivers/), for example:
|
||||
# $ docker-machine create --driver digitalocean ... mymachine
|
||||
@ -70,68 +77,95 @@ plugin: community.docker.docker_machine
|
||||
# ]
|
||||
# ...
|
||||
# }
|
||||
strict: no
|
||||
plugin: community.docker.docker_machine
|
||||
strict: false
|
||||
keyed_groups:
|
||||
- separator: ''
|
||||
key: docker_machine_node_attributes.DriverName
|
||||
|
||||
---
|
||||
# Example grouping hosts by Digital Machine tag
|
||||
strict: no
|
||||
plugin: community.docker.docker_machine
|
||||
strict: false
|
||||
keyed_groups:
|
||||
- prefix: tag
|
||||
key: 'dm_tags'
|
||||
|
||||
---
|
||||
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||
plugin: community.docker.docker_machine
|
||||
compose:
|
||||
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.utils.display import Display
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, Constructable
|
||||
from ansible.utils.display import Display
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||
filter_host,
|
||||
parse_filters,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||
make_unsafe,
|
||||
)
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
DaemonEnv = t.Literal[
|
||||
"require", "require-silently", "optional", "optional-silently", "skip"
|
||||
]
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using Docker machine as source. '''
|
||||
"""Host inventory parser for ansible using Docker machine as source."""
|
||||
|
||||
NAME = 'community.docker.docker_machine'
|
||||
NAME = "community.docker.docker_machine"
|
||||
|
||||
DOCKER_MACHINE_PATH = None
|
||||
docker_machine_path: str | None = None
|
||||
|
||||
def _run_command(self, args):
|
||||
if not self.DOCKER_MACHINE_PATH:
|
||||
def _run_command(self, args: list[str]) -> str:
|
||||
if not self.docker_machine_path:
|
||||
try:
|
||||
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
|
||||
self.docker_machine_path = get_bin_path("docker-machine")
|
||||
except ValueError as e:
|
||||
raise AnsibleError(to_native(e))
|
||||
raise AnsibleError(to_text(e)) from e
|
||||
|
||||
command = [self.DOCKER_MACHINE_PATH]
|
||||
command = [self.docker_machine_path]
|
||||
command.extend(args)
|
||||
display.debug('Executing command {0}'.format(command))
|
||||
display.debug(f"Executing command {command}")
|
||||
try:
|
||||
result = subprocess.check_output(command)
|
||||
except subprocess.CalledProcessError as e:
|
||||
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
|
||||
display.warning(
|
||||
f"Exception {type(e).__name__} caught while executing command {command}, this was the original exception: {e}"
|
||||
)
|
||||
raise e
|
||||
|
||||
return to_text(result).strip()
|
||||
|
||||
def _get_docker_daemon_variables(self, machine_name):
|
||||
'''
|
||||
def _get_docker_daemon_variables(self, machine_name: str) -> list[tuple[str, str]]:
|
||||
"""
|
||||
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||
'''
|
||||
"""
|
||||
try:
|
||||
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
|
||||
env_lines = self._run_command(
|
||||
["env", "--shell=sh", machine_name]
|
||||
).splitlines()
|
||||
except subprocess.CalledProcessError:
|
||||
# This can happen when the machine is created but provisioning is incomplete
|
||||
return []
|
||||
@ -146,22 +180,22 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||
# with the same name and value but with a dm_ name prefix.
|
||||
vars = []
|
||||
env_vars = []
|
||||
for line in env_lines:
|
||||
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||
if match:
|
||||
env_var_name = match.group(1)
|
||||
env_var_value = match.group(2)
|
||||
vars.append((env_var_name, env_var_value))
|
||||
env_vars.append((env_var_name, env_var_value))
|
||||
|
||||
return vars
|
||||
return env_vars
|
||||
|
||||
def _get_machine_names(self):
|
||||
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
|
||||
def _get_machine_names(self) -> list[str]:
|
||||
# Filter out machines that are not in the Running state as we probably cannot do anything useful actions
|
||||
# with them.
|
||||
ls_command = ['ls', '-q']
|
||||
if self.get_option('running_required'):
|
||||
ls_command.extend(['--filter', 'state=Running'])
|
||||
ls_command = ["ls", "-q"]
|
||||
if self.get_option("running_required"):
|
||||
ls_command.extend(["--filter", "state=Running"])
|
||||
|
||||
try:
|
||||
ls_lines = self._run_command(ls_command)
|
||||
@ -170,47 +204,62 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
return ls_lines.splitlines()
|
||||
|
||||
def _inspect_docker_machine_host(self, node):
|
||||
def _inspect_docker_machine_host(self, node: str) -> t.Any | None:
|
||||
try:
|
||||
inspect_lines = self._run_command(['inspect', self.node])
|
||||
inspect_lines = self._run_command(["inspect", node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return json.loads(inspect_lines)
|
||||
|
||||
def _ip_addr_docker_machine_host(self, node):
|
||||
def _ip_addr_docker_machine_host(self, node: str) -> t.Any | None:
|
||||
try:
|
||||
ip_addr = self._run_command(['ip', self.node])
|
||||
ip_addr = self._run_command(["ip", node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return ip_addr
|
||||
|
||||
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
|
||||
def _should_skip_host(
|
||||
self,
|
||||
machine_name: str,
|
||||
env_var_tuples: list[tuple[str, str]],
|
||||
daemon_env: DaemonEnv,
|
||||
) -> bool:
|
||||
if not env_var_tuples:
|
||||
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
|
||||
if daemon_env in ('require', 'require-silently'):
|
||||
if daemon_env == 'require':
|
||||
display.warning('{0}: host will be skipped'.format(warning_prefix))
|
||||
warning_prefix = f"Unable to fetch Docker daemon env vars from Docker Machine for host {machine_name}"
|
||||
if daemon_env in ("require", "require-silently"):
|
||||
if daemon_env == "require":
|
||||
display.warning(f"{warning_prefix}: host will be skipped")
|
||||
return True
|
||||
else: # 'optional', 'optional-silently'
|
||||
if daemon_env == 'optional':
|
||||
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
|
||||
if daemon_env == "optional":
|
||||
display.warning(
|
||||
f"{warning_prefix}: host will lack dm_DOCKER_xxx variables"
|
||||
)
|
||||
# daemon_env is 'optional-silently'
|
||||
return False
|
||||
|
||||
def _populate(self):
|
||||
daemon_env = self.get_option('daemon_env')
|
||||
def _populate(self) -> None:
|
||||
if self.inventory is None:
|
||||
raise AssertionError("Inventory must be there")
|
||||
|
||||
daemon_env: DaemonEnv = self.get_option("daemon_env")
|
||||
filters = parse_filters(self.get_option("filters"))
|
||||
try:
|
||||
for self.node in self._get_machine_names():
|
||||
self.node_attrs = self._inspect_docker_machine_host(self.node)
|
||||
if not self.node_attrs:
|
||||
for node in self._get_machine_names():
|
||||
node_attrs = self._inspect_docker_machine_host(node)
|
||||
if not node_attrs:
|
||||
continue
|
||||
|
||||
machine_name = self.node_attrs['Driver']['MachineName']
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
|
||||
machine_name = unsafe_node_attrs["Driver"]["MachineName"]
|
||||
if not filter_host(self, machine_name, unsafe_node_attrs, filters):
|
||||
continue
|
||||
|
||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||
# that could be used to set environment variables to influence a local Docker client:
|
||||
if daemon_env == 'skip':
|
||||
if daemon_env == "skip":
|
||||
env_var_tuples = []
|
||||
else:
|
||||
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||
@ -223,52 +272,90 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
||||
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
||||
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
||||
if self.node_attrs['Driver']['IPAddress']:
|
||||
ip_addr = self.node_attrs['Driver']['IPAddress']
|
||||
if unsafe_node_attrs["Driver"]["IPAddress"]:
|
||||
ip_addr = unsafe_node_attrs["Driver"]["IPAddress"]
|
||||
else:
|
||||
ip_addr = self._ip_addr_docker_machine_host(self.node)
|
||||
ip_addr = self._ip_addr_docker_machine_host(node)
|
||||
|
||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||
self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
|
||||
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
|
||||
self.inventory.set_variable(
|
||||
machine_name, "ansible_host", make_unsafe(ip_addr)
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
machine_name, "ansible_port", unsafe_node_attrs["Driver"]["SSHPort"]
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
machine_name, "ansible_user", unsafe_node_attrs["Driver"]["SSHUser"]
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
machine_name,
|
||||
"ansible_ssh_private_key_file",
|
||||
unsafe_node_attrs["Driver"]["SSHKeyPath"],
|
||||
)
|
||||
|
||||
# set variables based on Docker Machine tags
|
||||
tags = self.node_attrs['Driver'].get('Tags') or ''
|
||||
self.inventory.set_variable(machine_name, 'dm_tags', tags)
|
||||
tags = unsafe_node_attrs["Driver"].get("Tags") or ""
|
||||
self.inventory.set_variable(machine_name, "dm_tags", make_unsafe(tags))
|
||||
|
||||
# set variables based on Docker Machine env variables
|
||||
for kv in env_var_tuples:
|
||||
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
|
||||
self.inventory.set_variable(
|
||||
machine_name, f"dm_{kv[0]}", make_unsafe(kv[1])
|
||||
)
|
||||
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
|
||||
if self.get_option("verbose_output"):
|
||||
self.inventory.set_variable(
|
||||
machine_name,
|
||||
"docker_machine_node_attributes",
|
||||
unsafe_node_attrs,
|
||||
)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
strict = self.get_option("strict")
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
|
||||
self._set_composite_vars(
|
||||
self.get_option("compose"),
|
||||
unsafe_node_attrs,
|
||||
machine_name,
|
||||
strict=strict,
|
||||
)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option("groups"),
|
||||
unsafe_node_attrs,
|
||||
machine_name,
|
||||
strict=strict,
|
||||
)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option("keyed_groups"),
|
||||
unsafe_node_attrs,
|
||||
machine_name,
|
||||
strict=strict,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
|
||||
to_native(e), orig_exc=e)
|
||||
raise AnsibleError(
|
||||
f"Unable to fetch hosts from Docker Machine, this was the original exception: {e}"
|
||||
) from e
|
||||
|
||||
def verify_file(self, path):
|
||||
def verify_file(self, path: str) -> bool:
|
||||
"""Return the possibility of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
|
||||
return super().verify_file(path) and path.endswith(
|
||||
("docker_machine.yaml", "docker_machine.yml")
|
||||
)
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
def parse(
|
||||
self,
|
||||
inventory: InventoryData,
|
||||
loader: DataLoader,
|
||||
path: str,
|
||||
cache: bool = True,
|
||||
) -> None:
|
||||
super().parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
|
||||
@ -1,136 +1,139 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
from __future__ import annotations
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_swarm
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_swarm
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes
|
||||
requirements:
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file that ends with V(docker_swarm.(yml|yaml\)).
|
||||
- 'The plugin returns following groups of swarm nodes: C(all) - all hosts; C(workers) - all worker nodes; C(managers) -
|
||||
all manager nodes; C(leader) - the swarm leader node; C(nonleaders) - all nodes except the swarm leader.'
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker_swarm.yml) or V(docker_swarm.yaml). Other
|
||||
filenames will not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to V(community.docker.docker_swarm) for this plugin to recognize
|
||||
it as its own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [docker_swarm, community.docker.docker_swarm]
|
||||
docker_host:
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file docker_swarm.[yml|yaml].
|
||||
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
|
||||
I(managers) - all manager nodes; I(leader) - the swarm leader node;
|
||||
I(nonleaders) - all nodes except the swarm leader."
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(community.docker.docker_swarm)
|
||||
for this plugin to recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ docker_swarm, community.docker.docker_swarm ]
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- "Use C(unix://var/run/docker.sock) to connect via local socket."
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ docker_url ]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS),
|
||||
C(EngineVersion))
|
||||
type: bool
|
||||
default: yes
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
|
||||
host server.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ca_cert:
|
||||
description: Use a CA certificate when performing server verification by providing the path to a CA
|
||||
certificate file.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of
|
||||
the server.
|
||||
type: str
|
||||
ssl_version:
|
||||
description: Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by docker-py.
|
||||
type: str
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
|
||||
will be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [ time_out ]
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: no
|
||||
version_added: 1.5.0
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
|
||||
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
|
||||
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
|
||||
The port always defaults to C(2376).
|
||||
type: bool
|
||||
default: no
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in I(ansible_host_uri)
|
||||
type: int
|
||||
'''
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- Use V(unix:///var/run/docker.sock) to connect through a local socket.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [docker_url]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS), C(EngineVersion)).
|
||||
type: bool
|
||||
default: true
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: false
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: false
|
||||
aliases: [tls_verify]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [tls_client_key, key_path]
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has been added
|
||||
as an alias and can still be used.
|
||||
type: path
|
||||
aliases: [ca_cert, tls_ca_cert, cacert_path]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [tls_client_cert, cert_path]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of the server.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python.
|
||||
type: str
|
||||
aliases: [docker_api_version]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT). will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [time_out]
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the swarm leader
|
||||
in format of V(tcp://172.16.0.1:2376). This value may be used without additional modification as value of option O(docker_host)
|
||||
in Docker Swarm modules when connecting through the API. The port always defaults to V(2376).
|
||||
type: bool
|
||||
default: false
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in C(ansible_host_uri).
|
||||
type: int
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
---
|
||||
# Minimal example using local docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: unix://var/run/docker.sock
|
||||
docker_host: unix:///var/run/docker.sock
|
||||
|
||||
---
|
||||
# Minimal example using remote docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
---
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: yes
|
||||
tls: true
|
||||
|
||||
---
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: yes
|
||||
ca_cert: /somewhere/ca.pem
|
||||
validate_certs: true
|
||||
ca_path: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
---
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: False
|
||||
strict: false
|
||||
keyed_groups:
|
||||
# add for example x86_64 hosts to an arch_x86_64 group
|
||||
- prefix: arch
|
||||
@ -143,121 +146,195 @@ keyed_groups:
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'Spec.Labels'
|
||||
prefix: label
|
||||
'''
|
||||
"""
|
||||
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import get_connect_params
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import update_tls_hostname
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||
filter_host,
|
||||
parse_filters,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
get_connect_params,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
update_tls_hostname,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||
make_unsafe,
|
||||
)
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
|
||||
try:
|
||||
import docker
|
||||
|
||||
HAS_DOCKER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker swarm as source. '''
|
||||
"""Host inventory parser for ansible using Docker swarm as source."""
|
||||
|
||||
NAME = 'community.docker.docker_swarm'
|
||||
NAME = "community.docker.docker_swarm"
|
||||
|
||||
def _fail(self, msg):
|
||||
def _fail(self, msg: str) -> t.NoReturn:
|
||||
raise AnsibleError(msg)
|
||||
|
||||
def _populate(self):
|
||||
raw_params = dict(
|
||||
docker_host=self.get_option('docker_host'),
|
||||
tls=self.get_option('tls'),
|
||||
tls_verify=self.get_option('validate_certs'),
|
||||
key_path=self.get_option('client_key'),
|
||||
cacert_path=self.get_option('ca_cert'),
|
||||
cert_path=self.get_option('client_cert'),
|
||||
tls_hostname=self.get_option('tls_hostname'),
|
||||
api_version=self.get_option('api_version'),
|
||||
timeout=self.get_option('timeout'),
|
||||
ssl_version=self.get_option('ssl_version'),
|
||||
use_ssh_client=self.get_option('use_ssh_client'),
|
||||
debug=None,
|
||||
)
|
||||
def _populate(self) -> None:
|
||||
if self.inventory is None:
|
||||
raise AssertionError("Inventory must be there")
|
||||
|
||||
raw_params = {
|
||||
"docker_host": self.get_option("docker_host"),
|
||||
"tls": self.get_option("tls"),
|
||||
"tls_verify": self.get_option("validate_certs"),
|
||||
"key_path": self.get_option("client_key"),
|
||||
"cacert_path": self.get_option("ca_path"),
|
||||
"cert_path": self.get_option("client_cert"),
|
||||
"tls_hostname": self.get_option("tls_hostname"),
|
||||
"api_version": self.get_option("api_version"),
|
||||
"timeout": self.get_option("timeout"),
|
||||
"use_ssh_client": self.get_option("use_ssh_client"),
|
||||
"debug": None,
|
||||
}
|
||||
update_tls_hostname(raw_params)
|
||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||
self.client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group('all')
|
||||
self.inventory.add_group('manager')
|
||||
self.inventory.add_group('worker')
|
||||
self.inventory.add_group('leader')
|
||||
self.inventory.add_group('nonleaders')
|
||||
client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group("all")
|
||||
self.inventory.add_group("manager")
|
||||
self.inventory.add_group("worker")
|
||||
self.inventory.add_group("leader")
|
||||
self.inventory.add_group("nonleaders")
|
||||
|
||||
if self.get_option('include_host_uri'):
|
||||
if self.get_option('include_host_uri_port'):
|
||||
host_uri_port = str(self.get_option('include_host_uri_port'))
|
||||
elif self.get_option('tls') or self.get_option('validate_certs'):
|
||||
host_uri_port = '2376'
|
||||
filters = parse_filters(self.get_option("filters"))
|
||||
|
||||
if self.get_option("include_host_uri"):
|
||||
if self.get_option("include_host_uri_port"):
|
||||
host_uri_port = str(self.get_option("include_host_uri_port"))
|
||||
elif self.get_option("tls") or self.get_option("validate_certs"):
|
||||
host_uri_port = "2376"
|
||||
else:
|
||||
host_uri_port = '2375'
|
||||
host_uri_port = "2375"
|
||||
|
||||
try:
|
||||
self.nodes = self.client.nodes.list()
|
||||
for self.node in self.nodes:
|
||||
self.node_attrs = self.client.nodes.get(self.node.id).attrs
|
||||
self.inventory.add_host(self.node_attrs['ID'])
|
||||
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
|
||||
self.node_attrs['Status']['Addr'])
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
|
||||
if 'ManagerStatus' in self.node_attrs:
|
||||
if self.node_attrs['ManagerStatus'].get('Leader'):
|
||||
nodes = client.nodes.list()
|
||||
for node in nodes:
|
||||
node_attrs = client.nodes.get(node.id).attrs
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
if not filter_host(
|
||||
self, unsafe_node_attrs["ID"], unsafe_node_attrs, filters
|
||||
):
|
||||
continue
|
||||
self.inventory.add_host(unsafe_node_attrs["ID"])
|
||||
self.inventory.add_host(
|
||||
unsafe_node_attrs["ID"], group=unsafe_node_attrs["Spec"]["Role"]
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host",
|
||||
unsafe_node_attrs["Status"]["Addr"],
|
||||
)
|
||||
if self.get_option("include_host_uri"):
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host_uri",
|
||||
make_unsafe(
|
||||
"tcp://"
|
||||
+ unsafe_node_attrs["Status"]["Addr"]
|
||||
+ ":"
|
||||
+ host_uri_port
|
||||
),
|
||||
)
|
||||
if self.get_option("verbose_output"):
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"docker_swarm_node_attributes",
|
||||
unsafe_node_attrs,
|
||||
)
|
||||
if "ManagerStatus" in unsafe_node_attrs:
|
||||
if unsafe_node_attrs["ManagerStatus"].get("Leader"):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||
self.node_attrs['Status']['Addr']
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='leader')
|
||||
swarm_leader_ip = (
|
||||
parse_address(node_attrs["ManagerStatus"]["Addr"])[0]
|
||||
or unsafe_node_attrs["Status"]["Addr"]
|
||||
)
|
||||
if self.get_option("include_host_uri"):
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host_uri",
|
||||
make_unsafe(
|
||||
"tcp://" + swarm_leader_ip + ":" + host_uri_port
|
||||
),
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host",
|
||||
make_unsafe(swarm_leader_ip),
|
||||
)
|
||||
self.inventory.add_host(unsafe_node_attrs["ID"], group="leader")
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
self.inventory.add_host(
|
||||
unsafe_node_attrs["ID"], group="nonleaders"
|
||||
)
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
self.inventory.add_host(unsafe_node_attrs["ID"], group="nonleaders")
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
strict = self.get_option("strict")
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
self._set_composite_vars(
|
||||
self.get_option("compose"),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs["ID"],
|
||||
strict=strict,
|
||||
)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option("groups"),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs["ID"],
|
||||
strict=strict,
|
||||
)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option("keyed_groups"),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs["ID"],
|
||||
strict=strict,
|
||||
)
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
||||
to_native(e))
|
||||
raise AnsibleError(
|
||||
f"Unable to fetch hosts from Docker swarm API, this was the original exception: {e}"
|
||||
) from e
|
||||
|
||||
def verify_file(self, path):
|
||||
def verify_file(self, path: str) -> bool:
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
|
||||
return super().verify_file(path) and path.endswith(
|
||||
("docker_swarm.yaml", "docker_swarm.yml")
|
||||
)
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
def parse(
|
||||
self,
|
||||
inventory: InventoryData,
|
||||
loader: DataLoader,
|
||||
path: str,
|
||||
cache: bool = True,
|
||||
) -> None:
|
||||
if not HAS_DOCKER:
|
||||
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
|
||||
'https://github.com/docker/docker-py.')
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
raise AnsibleError(
|
||||
"The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: "
|
||||
"https://github.com/docker/docker-py."
|
||||
)
|
||||
super().parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
|
||||
103
plugins/module_utils/_api/_import_helper.py
Normal file
103
plugins/module_utils/_api/_import_helper.py
Normal file
@ -0,0 +1,103 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
|
||||
REQUESTS_IMPORT_ERROR: str | None # pylint: disable=invalid-name
|
||||
try:
|
||||
from requests import Session # noqa: F401, pylint: disable=unused-import
|
||||
from requests.adapters import ( # noqa: F401, pylint: disable=unused-import
|
||||
HTTPAdapter,
|
||||
)
|
||||
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
|
||||
HTTPError,
|
||||
InvalidSchema,
|
||||
)
|
||||
except ImportError:
|
||||
REQUESTS_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||
|
||||
class Session: # type: ignore
|
||||
__attrs__: list[t.Never] = []
|
||||
|
||||
class HTTPAdapter: # type: ignore
|
||||
__attrs__: list[t.Never] = []
|
||||
|
||||
class HTTPError(Exception): # type: ignore
|
||||
pass
|
||||
|
||||
class InvalidSchema(Exception): # type: ignore
|
||||
pass
|
||||
|
||||
else:
|
||||
REQUESTS_IMPORT_ERROR = None # pylint: disable=invalid-name
|
||||
|
||||
|
||||
URLLIB3_IMPORT_ERROR: str | None = None # pylint: disable=invalid-name
|
||||
try:
|
||||
from requests.packages import urllib3 # pylint: disable=unused-import
|
||||
|
||||
from requests.packages.urllib3 import ( # type: ignore # pylint: disable=unused-import # isort: skip
|
||||
connection as urllib3_connection,
|
||||
)
|
||||
except ImportError:
|
||||
try:
|
||||
import urllib3 # pylint: disable=unused-import
|
||||
from urllib3 import (
|
||||
connection as urllib3_connection, # pylint: disable=unused-import
|
||||
)
|
||||
except ImportError:
|
||||
URLLIB3_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||
|
||||
class _HTTPConnectionPool:
|
||||
pass
|
||||
|
||||
class _HTTPConnection:
|
||||
pass
|
||||
|
||||
class FakeURLLIB3:
|
||||
def __init__(self) -> None:
|
||||
self._collections = self
|
||||
self.poolmanager = self
|
||||
self.connection = self
|
||||
self.connectionpool = self
|
||||
|
||||
self.RecentlyUsedContainer = object() # pylint: disable=invalid-name
|
||||
self.PoolManager = object() # pylint: disable=invalid-name
|
||||
self.match_hostname = object()
|
||||
self.HTTPConnectionPool = ( # pylint: disable=invalid-name
|
||||
_HTTPConnectionPool
|
||||
)
|
||||
|
||||
class FakeURLLIB3Connection:
|
||||
def __init__(self) -> None:
|
||||
self.HTTPConnection = _HTTPConnection # pylint: disable=invalid-name
|
||||
|
||||
urllib3 = FakeURLLIB3()
|
||||
urllib3_connection = FakeURLLIB3Connection()
|
||||
|
||||
|
||||
def fail_on_missing_imports() -> None:
|
||||
if REQUESTS_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException # pylint: disable=cyclic-import
|
||||
|
||||
raise MissingRequirementException(
|
||||
"You have to install requests", "requests", REQUESTS_IMPORT_ERROR
|
||||
)
|
||||
if URLLIB3_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException # pylint: disable=cyclic-import
|
||||
|
||||
raise MissingRequirementException(
|
||||
"You have to install urllib3", "urllib3", URLLIB3_IMPORT_ERROR
|
||||
)
|
||||
1050
plugins/module_utils/_api/api/client.py
Normal file
1050
plugins/module_utils/_api/api/client.py
Normal file
File diff suppressed because it is too large
Load Diff
407
plugins/module_utils/_api/auth.py
Normal file
407
plugins/module_utils/_api/auth.py
Normal file
@ -0,0 +1,407 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import typing as t
|
||||
|
||||
from . import errors
|
||||
from .credentials.errors import CredentialsNotFound, StoreError
|
||||
from .credentials.store import Store
|
||||
from .utils import config
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||
APIClient,
|
||||
)
|
||||
|
||||
|
||||
INDEX_NAME = "docker.io"
|
||||
INDEX_URL = f"https://index.{INDEX_NAME}/v1/"
|
||||
TOKEN_USERNAME = "<token>"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def resolve_repository_name(repo_name: str) -> tuple[str, str]:
|
||||
if "://" in repo_name:
|
||||
raise errors.InvalidRepository(
|
||||
f"Repository name cannot contain a scheme ({repo_name})"
|
||||
)
|
||||
|
||||
index_name, remote_name = split_repo_name(repo_name)
|
||||
if index_name[0] == "-" or index_name[-1] == "-":
|
||||
raise errors.InvalidRepository(
|
||||
f"Invalid index name ({index_name}). Cannot begin or end with a hyphen."
|
||||
)
|
||||
return resolve_index_name(index_name), remote_name
|
||||
|
||||
|
||||
def resolve_index_name(index_name: str) -> str:
|
||||
index_name = convert_to_hostname(index_name)
|
||||
if index_name == "index." + INDEX_NAME:
|
||||
index_name = INDEX_NAME
|
||||
return index_name
|
||||
|
||||
|
||||
def get_config_header(client: APIClient, registry: str) -> bytes | None:
|
||||
log.debug("Looking for auth config")
|
||||
if not client._auth_configs or client._auth_configs.is_empty:
|
||||
log.debug("No auth config in memory - loading from filesystem")
|
||||
client._auth_configs = load_config(credstore_env=client.credstore_env)
|
||||
authcfg = resolve_authconfig(
|
||||
client._auth_configs, registry, credstore_env=client.credstore_env
|
||||
)
|
||||
# Do not fail here if no authentication exists for this
|
||||
# specific registry as we can have a readonly pull. Just
|
||||
# put the header if we can.
|
||||
if authcfg:
|
||||
log.debug("Found auth config")
|
||||
# auth_config needs to be a dict in the format used by
|
||||
# auth.py username , password, serveraddress, email
|
||||
return encode_header(authcfg)
|
||||
log.debug("No auth config found")
|
||||
return None
|
||||
|
||||
|
||||
def split_repo_name(repo_name: str) -> tuple[str, str]:
|
||||
parts = repo_name.split("/", 1)
|
||||
if len(parts) == 1 or (
|
||||
"." not in parts[0] and ":" not in parts[0] and parts[0] != "localhost"
|
||||
):
|
||||
# This is a docker index repo (ex: username/foobar or ubuntu)
|
||||
return INDEX_NAME, repo_name
|
||||
return tuple(parts) # type: ignore
|
||||
|
||||
|
||||
def get_credential_store(
|
||||
authconfig: dict[str, t.Any] | AuthConfig, registry: str
|
||||
) -> str | None:
|
||||
if not isinstance(authconfig, AuthConfig):
|
||||
authconfig = AuthConfig(authconfig)
|
||||
return authconfig.get_credential_store(registry)
|
||||
|
||||
|
||||
class AuthConfig(dict):
|
||||
def __init__(
|
||||
self, dct: dict[str, t.Any], credstore_env: dict[str, str] | None = None
|
||||
):
|
||||
if "auths" not in dct:
|
||||
dct["auths"] = {}
|
||||
self.update(dct)
|
||||
self._credstore_env = credstore_env
|
||||
self._stores: dict[str, Store] = {}
|
||||
|
||||
@classmethod
|
||||
def parse_auth(
|
||||
cls, entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
|
||||
) -> dict[str, dict[str, t.Any]]:
|
||||
"""
|
||||
Parses authentication entries
|
||||
|
||||
Args:
|
||||
entries: Dict of authentication entries.
|
||||
raise_on_error: If set to true, an invalid format will raise
|
||||
InvalidConfigFile
|
||||
|
||||
Returns:
|
||||
Authentication registry.
|
||||
"""
|
||||
|
||||
conf: dict[str, dict[str, t.Any]] = {}
|
||||
for registry, entry in entries.items():
|
||||
if not isinstance(entry, dict):
|
||||
log.debug("Config entry for key %s is not auth config", registry) # type: ignore
|
||||
# We sometimes fall back to parsing the whole config as if it
|
||||
# was the auth config by itself, for legacy purposes. In that
|
||||
# case, we fail silently and return an empty conf if any of the
|
||||
# keys is not formatted properly.
|
||||
if raise_on_error:
|
||||
raise errors.InvalidConfigFile(
|
||||
f"Invalid configuration for registry {registry}"
|
||||
)
|
||||
return {}
|
||||
if "identitytoken" in entry:
|
||||
log.debug("Found an IdentityToken entry for registry %s", registry)
|
||||
conf[registry] = {"IdentityToken": entry["identitytoken"]}
|
||||
continue # Other values are irrelevant if we have a token
|
||||
|
||||
if "auth" not in entry:
|
||||
# Starting with engine v1.11 (API 1.23), an empty dictionary is
|
||||
# a valid value in the auths config.
|
||||
# https://github.com/docker/compose/issues/3265
|
||||
log.debug(
|
||||
"Auth data for %s is absent. Client might be using a credentials store instead.",
|
||||
registry,
|
||||
)
|
||||
conf[registry] = {}
|
||||
continue
|
||||
|
||||
username, password = decode_auth(entry["auth"])
|
||||
log.debug(
|
||||
"Found entry (registry=%s, username=%s)", repr(registry), repr(username)
|
||||
)
|
||||
|
||||
conf[registry] = {
|
||||
"username": username,
|
||||
"password": password,
|
||||
"email": entry.get("email"),
|
||||
"serveraddress": registry,
|
||||
}
|
||||
return conf
|
||||
|
||||
@classmethod
|
||||
def load_config(
|
||||
cls,
|
||||
config_path: str | None,
|
||||
config_dict: dict[str, t.Any] | None,
|
||||
credstore_env: dict[str, str] | None = None,
|
||||
) -> t.Self:
|
||||
"""
|
||||
Loads authentication data from a Docker configuration file in the given
|
||||
root directory or if config_path is passed use given path.
|
||||
Lookup priority:
|
||||
explicit config_path parameter > DOCKER_CONFIG environment
|
||||
variable > ~/.docker/config.json > ~/.dockercfg
|
||||
"""
|
||||
|
||||
if not config_dict:
|
||||
config_file = config.find_config_file(config_path)
|
||||
|
||||
if not config_file:
|
||||
return cls({}, credstore_env)
|
||||
try:
|
||||
with open(config_file, "rt", encoding="utf-8") as f:
|
||||
config_dict = json.load(f)
|
||||
except (IOError, KeyError, ValueError) as e:
|
||||
# Likely missing new Docker config file or it is in an
|
||||
# unknown format, continue to attempt to read old location
|
||||
# and format.
|
||||
log.debug(e)
|
||||
return cls(_load_legacy_config(config_file), credstore_env)
|
||||
|
||||
res = {}
|
||||
if config_dict.get("auths"):
|
||||
log.debug("Found 'auths' section")
|
||||
res.update(
|
||||
{"auths": cls.parse_auth(config_dict.pop("auths"), raise_on_error=True)}
|
||||
)
|
||||
if config_dict.get("credsStore"):
|
||||
log.debug("Found 'credsStore' section")
|
||||
res.update({"credsStore": config_dict.pop("credsStore")})
|
||||
if config_dict.get("credHelpers"):
|
||||
log.debug("Found 'credHelpers' section")
|
||||
res.update({"credHelpers": config_dict.pop("credHelpers")})
|
||||
if res:
|
||||
return cls(res, credstore_env)
|
||||
|
||||
log.debug(
|
||||
"Could not find auth-related section ; attempting to interpret "
|
||||
"as auth-only file"
|
||||
)
|
||||
return cls({"auths": cls.parse_auth(config_dict)}, credstore_env)
|
||||
|
||||
@property
|
||||
def auths(self) -> dict[str, dict[str, t.Any]]:
|
||||
return self.get("auths", {})
|
||||
|
||||
@property
|
||||
def creds_store(self) -> str | None:
|
||||
return self.get("credsStore", None)
|
||||
|
||||
@property
|
||||
def cred_helpers(self) -> dict[str, t.Any]:
|
||||
return self.get("credHelpers", {})
|
||||
|
||||
@property
|
||||
def is_empty(self) -> bool:
|
||||
return not self.auths and not self.creds_store and not self.cred_helpers
|
||||
|
||||
def resolve_authconfig(
|
||||
self, registry: str | None = None
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Returns the authentication data from the given auth configuration for a
|
||||
specific registry. As with the Docker client, legacy entries in the
|
||||
config with full URLs are stripped down to hostnames before checking
|
||||
for a match. Returns None if no match was found.
|
||||
"""
|
||||
|
||||
if self.creds_store or self.cred_helpers:
|
||||
store_name = self.get_credential_store(registry)
|
||||
if store_name is not None:
|
||||
log.debug('Using credentials store "%s"', store_name)
|
||||
cfg = self._resolve_authconfig_credstore(registry, store_name)
|
||||
if cfg is not None:
|
||||
return cfg
|
||||
log.debug("No entry in credstore - fetching from auth dict")
|
||||
|
||||
# Default to the public index server
|
||||
registry = resolve_index_name(registry) if registry else INDEX_NAME
|
||||
log.debug("Looking for auth entry for %s", repr(registry))
|
||||
|
||||
if registry in self.auths:
|
||||
log.debug("Found %s", repr(registry))
|
||||
return self.auths[registry]
|
||||
|
||||
for key, conf in self.auths.items():
|
||||
if resolve_index_name(key) == registry:
|
||||
log.debug("Found %s", repr(key))
|
||||
return conf
|
||||
|
||||
log.debug("No entry found")
|
||||
return None
|
||||
|
||||
def _resolve_authconfig_credstore(
|
||||
self, registry: str | None, credstore_name: str
|
||||
) -> dict[str, t.Any] | None:
|
||||
if not registry or registry == INDEX_NAME:
|
||||
# The ecosystem is a little schizophrenic with index.docker.io VS
|
||||
# docker.io - in that case, it seems the full URL is necessary.
|
||||
registry = INDEX_URL
|
||||
log.debug("Looking for auth entry for %s", repr(registry))
|
||||
store = self._get_store_instance(credstore_name)
|
||||
try:
|
||||
data = store.get(registry)
|
||||
res = {
|
||||
"ServerAddress": registry,
|
||||
}
|
||||
if data["Username"] == TOKEN_USERNAME:
|
||||
res["IdentityToken"] = data["Secret"]
|
||||
else:
|
||||
res.update(
|
||||
{
|
||||
"Username": data["Username"],
|
||||
"Password": data["Secret"],
|
||||
}
|
||||
)
|
||||
return res
|
||||
except CredentialsNotFound:
|
||||
log.debug("No entry found")
|
||||
return None
|
||||
except StoreError as e:
|
||||
raise errors.DockerException(f"Credentials store error: {e}") from e
|
||||
|
||||
def _get_store_instance(self, name: str) -> Store:
|
||||
if name not in self._stores:
|
||||
self._stores[name] = Store(name, environment=self._credstore_env)
|
||||
return self._stores[name]
|
||||
|
||||
def get_credential_store(self, registry: str | None) -> str | None:
|
||||
if not registry or registry == INDEX_NAME:
|
||||
registry = INDEX_URL
|
||||
|
||||
return self.cred_helpers.get(registry) or self.creds_store
|
||||
|
||||
def get_all_credentials(self) -> dict[str, dict[str, t.Any] | None]:
|
||||
auth_data: dict[str, dict[str, t.Any] | None] = self.auths.copy() # type: ignore
|
||||
if self.creds_store:
|
||||
# Retrieve all credentials from the default store
|
||||
store = self._get_store_instance(self.creds_store)
|
||||
for k in store.list():
|
||||
auth_data[k] = self._resolve_authconfig_credstore(k, self.creds_store)
|
||||
auth_data[convert_to_hostname(k)] = auth_data[k]
|
||||
|
||||
# credHelpers entries take priority over all others
|
||||
for reg, store_name in self.cred_helpers.items():
|
||||
auth_data[reg] = self._resolve_authconfig_credstore(reg, store_name)
|
||||
auth_data[convert_to_hostname(reg)] = auth_data[reg]
|
||||
|
||||
return auth_data
|
||||
|
||||
def add_auth(self, reg: str, data: dict[str, t.Any]) -> None:
|
||||
self["auths"][reg] = data
|
||||
|
||||
|
||||
def resolve_authconfig(
|
||||
authconfig: AuthConfig | dict[str, t.Any],
|
||||
registry: str | None = None,
|
||||
credstore_env: dict[str, str] | None = None,
|
||||
) -> dict[str, t.Any] | None:
|
||||
if not isinstance(authconfig, AuthConfig):
|
||||
authconfig = AuthConfig(authconfig, credstore_env)
|
||||
return authconfig.resolve_authconfig(registry)
|
||||
|
||||
|
||||
def convert_to_hostname(url: str) -> str:
|
||||
return url.replace("http://", "").replace("https://", "").split("/", 1)[0]
|
||||
|
||||
|
||||
def decode_auth(auth: str | bytes) -> tuple[str, str]:
|
||||
if isinstance(auth, str):
|
||||
auth = auth.encode("ascii")
|
||||
s = base64.b64decode(auth)
|
||||
login, pwd = s.split(b":", 1)
|
||||
return login.decode("utf8"), pwd.decode("utf8")
|
||||
|
||||
|
||||
def encode_header(auth: dict[str, t.Any]) -> bytes:
|
||||
auth_json = json.dumps(auth).encode("ascii")
|
||||
return base64.urlsafe_b64encode(auth_json)
|
||||
|
||||
|
||||
def parse_auth(
|
||||
entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
|
||||
) -> dict[str, dict[str, t.Any]]:
|
||||
"""
|
||||
Parses authentication entries
|
||||
|
||||
Args:
|
||||
entries: Dict of authentication entries.
|
||||
raise_on_error: If set to true, an invalid format will raise
|
||||
InvalidConfigFile
|
||||
|
||||
Returns:
|
||||
Authentication registry.
|
||||
"""
|
||||
|
||||
return AuthConfig.parse_auth(entries, raise_on_error)
|
||||
|
||||
|
||||
def load_config(
|
||||
config_path: str | None = None,
|
||||
config_dict: dict[str, t.Any] | None = None,
|
||||
credstore_env: dict[str, str] | None = None,
|
||||
) -> AuthConfig:
|
||||
return AuthConfig.load_config(config_path, config_dict, credstore_env)
|
||||
|
||||
|
||||
def _load_legacy_config(config_file: str) -> dict[str, dict[str, t.Any]]:
|
||||
log.debug("Attempting to parse legacy auth file format")
|
||||
try:
|
||||
data = []
|
||||
with open(config_file, "rt", encoding="utf-8") as f:
|
||||
for line in f.readlines():
|
||||
data.append(line.strip().split(" = ")[1])
|
||||
if len(data) < 2:
|
||||
# Not enough data
|
||||
raise errors.InvalidConfigFile("Invalid or empty configuration file!")
|
||||
|
||||
username, password = decode_auth(data[0])
|
||||
return {
|
||||
"auths": {
|
||||
INDEX_NAME: {
|
||||
"username": username,
|
||||
"password": password,
|
||||
"email": data[1],
|
||||
"serveraddress": INDEX_URL,
|
||||
}
|
||||
}
|
||||
}
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
log.debug(e)
|
||||
|
||||
log.debug("All parsing attempts failed - returning empty config")
|
||||
return {}
|
||||
41
plugins/module_utils/_api/constants.py
Normal file
41
plugins/module_utils/_api/constants.py
Normal file
@ -0,0 +1,41 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
MINIMUM_DOCKER_API_VERSION = "1.21"
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
CONTAINER_LIMITS_KEYS = ["memory", "memswap", "cpushares", "cpusetcpus"]
|
||||
|
||||
DEFAULT_HTTP_HOST = "127.0.0.1"
|
||||
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
|
||||
DEFAULT_NPIPE = "npipe:////./pipe/docker_engine"
|
||||
|
||||
BYTE_UNITS = {"b": 1, "k": 1024, "m": 1024 * 1024, "g": 1024 * 1024 * 1024}
|
||||
|
||||
IS_WINDOWS_PLATFORM = sys.platform == "win32"
|
||||
WINDOWS_LONGPATH_PREFIX = "\\\\?\\"
|
||||
|
||||
DEFAULT_USER_AGENT = "ansible-community.docker"
|
||||
DEFAULT_NUM_POOLS = 25
|
||||
|
||||
# The OpenSSH server default value for MaxSessions is 10 which means we can
|
||||
# use up to 9, leaving the final session for the underlying SSH connection.
|
||||
# For more details see: https://github.com/docker/docker-py/issues/2246
|
||||
DEFAULT_NUM_POOLS_SSH = 9
|
||||
|
||||
DEFAULT_MAX_POOL_SIZE = 10
|
||||
|
||||
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
|
||||
254
plugins/module_utils/_api/context/api.py
Normal file
254
plugins/module_utils/_api/context/api.py
Normal file
@ -0,0 +1,254 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import typing as t
|
||||
|
||||
from .. import errors
|
||||
from .config import (
|
||||
METAFILE,
|
||||
get_current_context_name,
|
||||
get_meta_dir,
|
||||
write_context_name_to_docker_config,
|
||||
)
|
||||
from .context import Context
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ..tls import TLSConfig
|
||||
|
||||
|
||||
def create_default_context() -> Context:
|
||||
host = None
|
||||
if os.environ.get("DOCKER_HOST"):
|
||||
host = os.environ.get("DOCKER_HOST")
|
||||
return Context(
|
||||
"default", "swarm", host, description="Current DOCKER_HOST based configuration"
|
||||
)
|
||||
|
||||
|
||||
class ContextAPI:
|
||||
"""Context API.
|
||||
Contains methods for context management:
|
||||
create, list, remove, get, inspect.
|
||||
"""
|
||||
|
||||
DEFAULT_CONTEXT = None
|
||||
|
||||
@classmethod
|
||||
def get_default_context(cls) -> Context:
|
||||
context = cls.DEFAULT_CONTEXT
|
||||
if context is None:
|
||||
context = create_default_context()
|
||||
cls.DEFAULT_CONTEXT = context
|
||||
return context
|
||||
|
||||
@classmethod
|
||||
def create_context(
|
||||
cls,
|
||||
name: str,
|
||||
orchestrator: str | None = None,
|
||||
host: str | None = None,
|
||||
tls_cfg: TLSConfig | None = None,
|
||||
default_namespace: str | None = None,
|
||||
skip_tls_verify: bool = False,
|
||||
) -> Context:
|
||||
"""Creates a new context.
|
||||
Returns:
|
||||
(Context): a Context object.
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextAlreadyExists`
|
||||
If a context with the name already exists.
|
||||
:py:class:`docker.errors.ContextException`
|
||||
If name is default.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ctx = ContextAPI.create_context(name='test')
|
||||
>>> print(ctx.Metadata)
|
||||
{
|
||||
"Name": "test",
|
||||
"Metadata": {},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "unix:///var/run/docker.sock",
|
||||
"SkipTLSVerify": false
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
raise errors.ContextException('"default" is a reserved context name')
|
||||
ctx = Context.load_context(name)
|
||||
if ctx:
|
||||
raise errors.ContextAlreadyExists(name)
|
||||
endpoint = "docker"
|
||||
if orchestrator and orchestrator != "swarm":
|
||||
endpoint = orchestrator
|
||||
ctx = Context(name, orchestrator)
|
||||
ctx.set_endpoint(
|
||||
endpoint,
|
||||
host,
|
||||
tls_cfg,
|
||||
skip_tls_verify=skip_tls_verify,
|
||||
def_namespace=default_namespace,
|
||||
)
|
||||
ctx.save()
|
||||
return ctx
|
||||
|
||||
@classmethod
|
||||
def get_context(cls, name: str | None = None) -> Context | None:
|
||||
"""Retrieves a context object.
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ctx = ContextAPI.get_context(name='test')
|
||||
>>> print(ctx.Metadata)
|
||||
{
|
||||
"Name": "test",
|
||||
"Metadata": {},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "unix:///var/run/docker.sock",
|
||||
"SkipTLSVerify": false
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not name:
|
||||
name = get_current_context_name()
|
||||
if name == "default":
|
||||
return cls.get_default_context()
|
||||
return Context.load_context(name)
|
||||
|
||||
@classmethod
|
||||
def contexts(cls) -> list[Context]:
|
||||
"""Context list.
|
||||
Returns:
|
||||
(Context): List of context objects.
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If something goes wrong.
|
||||
"""
|
||||
names = []
|
||||
for dirname, dummy, fnames in os.walk(get_meta_dir()):
|
||||
for filename in fnames:
|
||||
if filename == METAFILE:
|
||||
filepath = os.path.join(dirname, filename)
|
||||
try:
|
||||
with open(filepath, "rt", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
name = data["Name"]
|
||||
if name == "default":
|
||||
raise ValueError('"default" is a reserved context name')
|
||||
names.append(name)
|
||||
except Exception as e:
|
||||
raise errors.ContextException(
|
||||
f"Failed to load metafile {filepath}: {e}"
|
||||
) from e
|
||||
|
||||
contexts = [cls.get_default_context()]
|
||||
for name in names:
|
||||
context = Context.load_context(name)
|
||||
if not context:
|
||||
raise errors.ContextException(f"Context {name} cannot be found")
|
||||
contexts.append(context)
|
||||
return contexts
|
||||
|
||||
@classmethod
|
||||
def get_current_context(cls) -> Context | None:
|
||||
"""Get current context.
|
||||
Returns:
|
||||
(Context): current context object.
|
||||
"""
|
||||
return cls.get_context()
|
||||
|
||||
@classmethod
|
||||
def set_current_context(cls, name: str = "default") -> None:
|
||||
ctx = cls.get_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
|
||||
err = write_context_name_to_docker_config(name)
|
||||
if err:
|
||||
raise errors.ContextException(f"Failed to set current context: {err}")
|
||||
|
||||
@classmethod
|
||||
def remove_context(cls, name: str) -> None:
|
||||
"""Remove a context. Similar to the ``docker context rm`` command.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextNotFound`
|
||||
If a context with the name does not exist.
|
||||
:py:class:`docker.errors.ContextException`
|
||||
If name is default.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ContextAPI.remove_context(name='test')
|
||||
>>>
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
raise errors.ContextException('context "default" cannot be removed')
|
||||
ctx = Context.load_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
if name == get_current_context_name():
|
||||
write_context_name_to_docker_config(None)
|
||||
ctx.remove()
|
||||
|
||||
@classmethod
|
||||
def inspect_context(cls, name: str = "default") -> dict[str, t.Any]:
|
||||
"""Inspect a context. Similar to the ``docker context inspect`` command.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextNotFound`
|
||||
If a context with the name does not exist.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ContextAPI.remove_context(name='test')
|
||||
>>>
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
return cls.get_default_context()()
|
||||
ctx = Context.load_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
|
||||
return ctx()
|
||||
108
plugins/module_utils/_api/context/config.py
Normal file
108
plugins/module_utils/_api/context/config.py
Normal file
@ -0,0 +1,108 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
|
||||
from ..constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
|
||||
from ..utils.config import find_config_file, get_default_config_file
|
||||
from ..utils.utils import parse_host
|
||||
|
||||
|
||||
METAFILE = "meta.json"
|
||||
|
||||
|
||||
def get_current_context_name_with_source() -> tuple[str, str]:
|
||||
if os.environ.get("DOCKER_HOST"):
|
||||
return "default", "DOCKER_HOST environment variable set"
|
||||
if os.environ.get("DOCKER_CONTEXT"):
|
||||
return os.environ["DOCKER_CONTEXT"], "DOCKER_CONTEXT environment variable set"
|
||||
docker_cfg_path = find_config_file()
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
|
||||
return (
|
||||
json.load(f).get("currentContext", "default"),
|
||||
f"configuration file {docker_cfg_path}",
|
||||
)
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
pass
|
||||
return "default", "fallback value"
|
||||
|
||||
|
||||
def get_current_context_name() -> str:
|
||||
return get_current_context_name_with_source()[0]
|
||||
|
||||
|
||||
def write_context_name_to_docker_config(name: str | None = None) -> Exception | None:
|
||||
if name == "default":
|
||||
name = None
|
||||
docker_cfg_path = find_config_file()
|
||||
config = {}
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
|
||||
config = json.load(f)
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
return e
|
||||
current_context = config.get("currentContext", None)
|
||||
if current_context and not name:
|
||||
del config["currentContext"]
|
||||
elif name:
|
||||
config["currentContext"] = name
|
||||
else:
|
||||
return None
|
||||
if not docker_cfg_path:
|
||||
docker_cfg_path = get_default_config_file()
|
||||
try:
|
||||
with open(docker_cfg_path, "wt", encoding="utf-8") as f:
|
||||
json.dump(config, f, indent=4)
|
||||
return None
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
return e
|
||||
|
||||
|
||||
def get_context_id(name: str) -> str:
|
||||
return hashlib.sha256(name.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def get_context_dir() -> str:
|
||||
docker_cfg_path = find_config_file() or get_default_config_file()
|
||||
return os.path.join(os.path.dirname(docker_cfg_path), "contexts")
|
||||
|
||||
|
||||
def get_meta_dir(name: str | None = None) -> str:
|
||||
meta_dir = os.path.join(get_context_dir(), "meta")
|
||||
if name:
|
||||
return os.path.join(meta_dir, get_context_id(name))
|
||||
return meta_dir
|
||||
|
||||
|
||||
def get_meta_file(name: str) -> str:
|
||||
return os.path.join(get_meta_dir(name), METAFILE)
|
||||
|
||||
|
||||
def get_tls_dir(name: str | None = None, endpoint: str = "") -> str:
|
||||
context_dir = get_context_dir()
|
||||
if name:
|
||||
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
|
||||
return os.path.join(context_dir, "tls")
|
||||
|
||||
|
||||
def get_context_host(path: str | None = None, tls: bool = False) -> str:
|
||||
host = parse_host(path, IS_WINDOWS_PLATFORM, tls)
|
||||
if host == DEFAULT_UNIX_SOCKET and host.startswith("http+"):
|
||||
# remove http+ from default docker socket url
|
||||
host = host[5:]
|
||||
return host
|
||||
287
plugins/module_utils/_api/context/context.py
Normal file
287
plugins/module_utils/_api/context/context.py
Normal file
@ -0,0 +1,287 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import typing as t
|
||||
from shutil import copyfile, rmtree
|
||||
|
||||
from ..errors import ContextException
|
||||
from ..tls import TLSConfig
|
||||
from .config import (
|
||||
get_context_host,
|
||||
get_meta_dir,
|
||||
get_meta_file,
|
||||
get_tls_dir,
|
||||
)
|
||||
|
||||
|
||||
IN_MEMORY = "IN MEMORY"
|
||||
|
||||
|
||||
class Context:
|
||||
"""A context."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
orchestrator: str | None = None,
|
||||
host: str | None = None,
|
||||
endpoints: dict[str, dict[str, t.Any]] | None = None,
|
||||
skip_tls_verify: bool = False,
|
||||
tls: bool = False,
|
||||
description: str | None = None,
|
||||
) -> None:
|
||||
if not name:
|
||||
raise ValueError("Name not provided")
|
||||
self.name = name
|
||||
self.context_type = None
|
||||
self.orchestrator = orchestrator
|
||||
self.endpoints = {}
|
||||
self.tls_cfg: dict[str, TLSConfig] = {}
|
||||
self.meta_path = IN_MEMORY
|
||||
self.tls_path = IN_MEMORY
|
||||
self.description = description
|
||||
|
||||
if not endpoints:
|
||||
# set default docker endpoint if no endpoint is set
|
||||
default_endpoint = (
|
||||
"docker"
|
||||
if (not orchestrator or orchestrator == "swarm")
|
||||
else orchestrator
|
||||
)
|
||||
|
||||
self.endpoints = {
|
||||
default_endpoint: {
|
||||
"Host": get_context_host(host, skip_tls_verify or tls),
|
||||
"SkipTLSVerify": skip_tls_verify,
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
# check docker endpoints
|
||||
for k, v in endpoints.items():
|
||||
if not isinstance(v, dict):
|
||||
# unknown format
|
||||
raise ContextException(
|
||||
f"Unknown endpoint format for context {name}: {v}",
|
||||
)
|
||||
|
||||
self.endpoints[k] = v
|
||||
if k != "docker":
|
||||
continue
|
||||
|
||||
self.endpoints[k]["Host"] = v.get(
|
||||
"Host", get_context_host(host, skip_tls_verify or tls)
|
||||
)
|
||||
self.endpoints[k]["SkipTLSVerify"] = bool(
|
||||
v.get("SkipTLSVerify", skip_tls_verify)
|
||||
)
|
||||
|
||||
def set_endpoint(
|
||||
self,
|
||||
name: str = "docker",
|
||||
host: str | None = None,
|
||||
tls_cfg: TLSConfig | None = None,
|
||||
skip_tls_verify: bool = False,
|
||||
def_namespace: str | None = None,
|
||||
) -> None:
|
||||
self.endpoints[name] = {
|
||||
"Host": get_context_host(host, not skip_tls_verify or tls_cfg is not None),
|
||||
"SkipTLSVerify": skip_tls_verify,
|
||||
}
|
||||
if def_namespace:
|
||||
self.endpoints[name]["DefaultNamespace"] = def_namespace
|
||||
|
||||
if tls_cfg:
|
||||
self.tls_cfg[name] = tls_cfg
|
||||
|
||||
def inspect(self) -> dict[str, t.Any]:
|
||||
return self()
|
||||
|
||||
@classmethod
|
||||
def load_context(cls, name: str) -> t.Self | None:
|
||||
meta = Context._load_meta(name)
|
||||
if meta:
|
||||
instance = cls(
|
||||
meta["Name"],
|
||||
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
|
||||
endpoints=meta.get("Endpoints", None),
|
||||
description=meta["Metadata"].get("Description"),
|
||||
)
|
||||
instance.context_type = meta["Metadata"].get("Type", None)
|
||||
instance._load_certs()
|
||||
instance.meta_path = get_meta_dir(name)
|
||||
return instance
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _load_meta(cls, name: str) -> dict[str, t.Any] | None:
|
||||
meta_file = get_meta_file(name)
|
||||
if not os.path.isfile(meta_file):
|
||||
return None
|
||||
|
||||
metadata: dict[str, t.Any] = {}
|
||||
try:
|
||||
with open(meta_file, "rt", encoding="utf-8") as f:
|
||||
metadata = json.load(f)
|
||||
except (OSError, KeyError, ValueError) as e:
|
||||
# unknown format
|
||||
raise RuntimeError(
|
||||
f"Detected corrupted meta file for context {name} : {e}"
|
||||
) from e
|
||||
|
||||
# for docker endpoints, set defaults for
|
||||
# Host and SkipTLSVerify fields
|
||||
for k, v in metadata["Endpoints"].items():
|
||||
if k != "docker":
|
||||
continue
|
||||
metadata["Endpoints"][k]["Host"] = v.get(
|
||||
"Host", get_context_host(None, False)
|
||||
)
|
||||
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
|
||||
v.get("SkipTLSVerify", True)
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
||||
def _load_certs(self) -> None:
|
||||
certs = {}
|
||||
tls_dir = get_tls_dir(self.name)
|
||||
for endpoint in self.endpoints:
|
||||
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||
continue
|
||||
ca_cert = None
|
||||
cert = None
|
||||
key = None
|
||||
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
|
||||
if filename.startswith("ca"):
|
||||
ca_cert = os.path.join(tls_dir, endpoint, filename)
|
||||
elif filename.startswith("cert"):
|
||||
cert = os.path.join(tls_dir, endpoint, filename)
|
||||
elif filename.startswith("key"):
|
||||
key = os.path.join(tls_dir, endpoint, filename)
|
||||
if all([cert, key]) or ca_cert:
|
||||
verify = None
|
||||
if endpoint == "docker" and not self.endpoints["docker"].get(
|
||||
"SkipTLSVerify", False
|
||||
):
|
||||
verify = True
|
||||
certs[endpoint] = TLSConfig(
|
||||
client_cert=(cert, key) if cert and key else None,
|
||||
ca_cert=ca_cert,
|
||||
verify=verify,
|
||||
)
|
||||
self.tls_cfg = certs
|
||||
self.tls_path = tls_dir
|
||||
|
||||
def save(self) -> None:
|
||||
meta_dir = get_meta_dir(self.name)
|
||||
if not os.path.isdir(meta_dir):
|
||||
os.makedirs(meta_dir)
|
||||
with open(get_meta_file(self.name), "wt", encoding="utf-8") as f:
|
||||
f.write(json.dumps(self.Metadata))
|
||||
|
||||
tls_dir = get_tls_dir(self.name)
|
||||
for endpoint, tls in self.tls_cfg.items():
|
||||
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||
os.makedirs(os.path.join(tls_dir, endpoint))
|
||||
|
||||
ca_file = tls.ca_cert
|
||||
if ca_file:
|
||||
copyfile(
|
||||
ca_file, os.path.join(tls_dir, endpoint, os.path.basename(ca_file))
|
||||
)
|
||||
|
||||
if tls.cert:
|
||||
cert_file, key_file = tls.cert
|
||||
copyfile(
|
||||
cert_file,
|
||||
os.path.join(tls_dir, endpoint, os.path.basename(cert_file)),
|
||||
)
|
||||
copyfile(
|
||||
key_file,
|
||||
os.path.join(tls_dir, endpoint, os.path.basename(key_file)),
|
||||
)
|
||||
|
||||
self.meta_path = get_meta_dir(self.name)
|
||||
self.tls_path = get_tls_dir(self.name)
|
||||
|
||||
def remove(self) -> None:
|
||||
if os.path.isdir(self.meta_path):
|
||||
rmtree(self.meta_path)
|
||||
if os.path.isdir(self.tls_path):
|
||||
rmtree(self.tls_path)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__}: '{self.name}'>"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return json.dumps(self.__call__(), indent=2)
|
||||
|
||||
def __call__(self) -> dict[str, t.Any]:
|
||||
result = self.Metadata
|
||||
result.update(self.TLSMaterial)
|
||||
result.update(self.Storage)
|
||||
return result
|
||||
|
||||
def is_docker_host(self) -> bool:
|
||||
return self.context_type is None
|
||||
|
||||
@property
|
||||
def Name(self) -> str: # pylint: disable=invalid-name
|
||||
return self.name
|
||||
|
||||
@property
|
||||
def Host(self) -> str | None: # pylint: disable=invalid-name
|
||||
if not self.orchestrator or self.orchestrator == "swarm":
|
||||
endpoint = self.endpoints.get("docker", None)
|
||||
if endpoint:
|
||||
return endpoint.get("Host", None) # type: ignore
|
||||
return None
|
||||
|
||||
return self.endpoints[self.orchestrator].get("Host", None) # type: ignore
|
||||
|
||||
@property
|
||||
def Orchestrator(self) -> str | None: # pylint: disable=invalid-name
|
||||
return self.orchestrator
|
||||
|
||||
@property
|
||||
def Metadata(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
|
||||
meta: dict[str, t.Any] = {}
|
||||
if self.orchestrator:
|
||||
meta = {"StackOrchestrator": self.orchestrator}
|
||||
return {"Name": self.name, "Metadata": meta, "Endpoints": self.endpoints}
|
||||
|
||||
@property
|
||||
def TLSConfig(self) -> TLSConfig | None: # pylint: disable=invalid-name
|
||||
key = self.orchestrator
|
||||
if not key or key == "swarm":
|
||||
key = "docker"
|
||||
if key in self.tls_cfg:
|
||||
return self.tls_cfg[key]
|
||||
return None
|
||||
|
||||
@property
|
||||
def TLSMaterial(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
|
||||
certs: dict[str, t.Any] = {}
|
||||
for endpoint, tls in self.tls_cfg.items():
|
||||
paths = [tls.ca_cert, *tls.cert] if tls.cert else [tls.ca_cert]
|
||||
certs[endpoint] = [
|
||||
os.path.basename(path) if path else None for path in paths
|
||||
]
|
||||
return {"TLSMaterial": certs}
|
||||
|
||||
@property
|
||||
def Storage(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
|
||||
return {"Storage": {"MetadataPath": self.meta_path, "TLSPath": self.tls_path}}
|
||||
18
plugins/module_utils/_api/credentials/constants.py
Normal file
18
plugins/module_utils/_api/credentials/constants.py
Normal file
@ -0,0 +1,18 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
PROGRAM_PREFIX = "docker-credential-"
|
||||
DEFAULT_LINUX_STORE = "secretservice"
|
||||
DEFAULT_OSX_STORE = "osxkeychain"
|
||||
DEFAULT_WIN32_STORE = "wincred"
|
||||
39
plugins/module_utils/_api/credentials/errors.py
Normal file
39
plugins/module_utils/_api/credentials/errors.py
Normal file
@ -0,0 +1,39 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
|
||||
class StoreError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class CredentialsNotFound(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
class InitializationError(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
def process_store_error(cpe: CalledProcessError, program: str) -> StoreError:
|
||||
message = cpe.output.decode("utf-8")
|
||||
if "credentials not found in native keychain" in message:
|
||||
return CredentialsNotFound(f"No matching credentials in {program}")
|
||||
return StoreError(
|
||||
f'Credentials store {program} exited with "{cpe.output.decode("utf-8").strip()}".'
|
||||
)
|
||||
102
plugins/module_utils/_api/credentials/store.py
Normal file
102
plugins/module_utils/_api/credentials/store.py
Normal file
@ -0,0 +1,102 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import json
|
||||
import subprocess
|
||||
import typing as t
|
||||
|
||||
from . import constants, errors
|
||||
from .utils import create_environment_dict, find_executable
|
||||
|
||||
|
||||
class Store:
|
||||
def __init__(self, program: str, environment: dict[str, str] | None = None) -> None:
|
||||
"""Create a store object that acts as an interface to
|
||||
perform the basic operations for storing, retrieving
|
||||
and erasing credentials using `program`.
|
||||
"""
|
||||
self.program = constants.PROGRAM_PREFIX + program
|
||||
self.exe = find_executable(self.program)
|
||||
self.environment = environment
|
||||
if self.exe is None:
|
||||
raise errors.InitializationError(
|
||||
f"{self.program} not installed or not available in PATH"
|
||||
)
|
||||
|
||||
def get(self, server: str | bytes) -> dict[str, t.Any]:
|
||||
"""Retrieve credentials for `server`. If no credentials are found,
|
||||
a `StoreError` will be raised.
|
||||
"""
|
||||
if not isinstance(server, bytes):
|
||||
server = server.encode("utf-8")
|
||||
data = self._execute("get", server)
|
||||
result = json.loads(data.decode("utf-8"))
|
||||
|
||||
# docker-credential-pass will return an object for inexistent servers
|
||||
# whereas other helpers will exit with returncode != 0. For
|
||||
# consistency, if no significant data is returned,
|
||||
# raise CredentialsNotFound
|
||||
if result["Username"] == "" and result["Secret"] == "":
|
||||
raise errors.CredentialsNotFound(
|
||||
f"No matching credentials in {self.program}"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def store(self, server: str, username: str, secret: str) -> bytes:
|
||||
"""Store credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
data_input = json.dumps(
|
||||
{"ServerURL": server, "Username": username, "Secret": secret}
|
||||
).encode("utf-8")
|
||||
return self._execute("store", data_input)
|
||||
|
||||
def erase(self, server: str | bytes) -> None:
|
||||
"""Erase credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
if not isinstance(server, bytes):
|
||||
server = server.encode("utf-8")
|
||||
self._execute("erase", server)
|
||||
|
||||
def list(self) -> t.Any:
|
||||
"""List stored credentials. Requires v0.4.0+ of the helper."""
|
||||
data = self._execute("list", None)
|
||||
return json.loads(data.decode("utf-8"))
|
||||
|
||||
def _execute(self, subcmd: str, data_input: bytes | None) -> bytes:
|
||||
if self.exe is None:
|
||||
raise errors.StoreError(
|
||||
f"{self.program} not installed or not available in PATH"
|
||||
)
|
||||
output = None
|
||||
env = create_environment_dict(self.environment)
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
[self.exe, subcmd],
|
||||
input=data_input,
|
||||
env=env,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise errors.process_store_error(e, self.program) from e
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise errors.StoreError(
|
||||
f"{self.program} not installed or not available in PATH"
|
||||
) from e
|
||||
raise errors.StoreError(
|
||||
f'Unexpected OS error "{e.strerror}", errno={e.errno}'
|
||||
) from e
|
||||
return output
|
||||
35
plugins/module_utils/_api/credentials/utils.py
Normal file
35
plugins/module_utils/_api/credentials/utils.py
Normal file
@ -0,0 +1,35 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from shutil import which
|
||||
|
||||
|
||||
def find_executable(executable: str, path: str | None = None) -> str | None:
|
||||
"""
|
||||
As distutils.spawn.find_executable, but on Windows, look up
|
||||
every extension declared in PATHEXT instead of just `.exe`
|
||||
"""
|
||||
# shutil.which() already uses PATHEXT on Windows, so on
|
||||
# Python 3 we can simply use shutil.which() in all cases.
|
||||
# (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
|
||||
return which(executable, path=path)
|
||||
|
||||
|
||||
def create_environment_dict(overrides: dict[str, str] | None) -> dict[str, str]:
|
||||
"""
|
||||
Create and return a copy of os.environ with the specified overrides
|
||||
"""
|
||||
result = os.environ.copy()
|
||||
result.update(overrides or {})
|
||||
return result
|
||||
245
plugins/module_utils/_api/errors.py
Normal file
245
plugins/module_utils/_api/errors.py
Normal file
@ -0,0 +1,245 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ._import_helper import HTTPError as _HTTPError
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from requests import Response
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
"""
|
||||
A base class from which all other exceptions inherit.
|
||||
|
||||
If you want to catch all errors that the Docker SDK might raise,
|
||||
catch this base exception.
|
||||
"""
|
||||
|
||||
|
||||
def create_api_error_from_http_exception(e: _HTTPError) -> t.NoReturn:
|
||||
"""
|
||||
Create a suitable APIError from requests.exceptions.HTTPError.
|
||||
"""
|
||||
response = e.response
|
||||
try:
|
||||
explanation = response.json()["message"]
|
||||
except ValueError:
|
||||
explanation = to_text((response.content or "").strip())
|
||||
cls = APIError
|
||||
if response.status_code == 404:
|
||||
if explanation and (
|
||||
"No such image" in str(explanation)
|
||||
or "not found: does not exist or no pull access" in str(explanation)
|
||||
or "repository does not exist" in str(explanation)
|
||||
):
|
||||
cls = ImageNotFound
|
||||
else:
|
||||
cls = NotFound
|
||||
raise cls(e, response=response, explanation=explanation) from e
|
||||
|
||||
|
||||
class APIError(_HTTPError, DockerException):
|
||||
"""
|
||||
An HTTP error from the API.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str | Exception,
|
||||
response: Response | None = None,
|
||||
explanation: str | None = None,
|
||||
) -> None:
|
||||
# requests 1.2 supports response as a keyword argument, but
|
||||
# requests 1.1 does not
|
||||
super().__init__(message)
|
||||
self.response = response
|
||||
self.explanation = explanation or ""
|
||||
|
||||
def __str__(self) -> str:
|
||||
message = super().__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = f"{self.response.status_code} Client Error for {self.response.url}: {self.response.reason}"
|
||||
|
||||
elif self.is_server_error():
|
||||
message = f"{self.response.status_code} Server Error for {self.response.url}: {self.response.reason}"
|
||||
|
||||
if self.explanation:
|
||||
message = f'{message} ("{self.explanation}")'
|
||||
|
||||
return message
|
||||
|
||||
@property
|
||||
def status_code(self) -> int | None:
|
||||
if self.response is not None:
|
||||
return self.response.status_code
|
||||
return None
|
||||
|
||||
def is_error(self) -> bool:
|
||||
return self.is_client_error() or self.is_server_error()
|
||||
|
||||
def is_client_error(self) -> bool:
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 400 <= self.status_code < 500
|
||||
|
||||
def is_server_error(self) -> bool:
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 500 <= self.status_code < 600
|
||||
|
||||
|
||||
class NotFound(APIError):
|
||||
pass
|
||||
|
||||
|
||||
class ImageNotFound(NotFound):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidVersion(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidRepository(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidConfigFile(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidArgument(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class DeprecatedMethod(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class TLSParameterError(DockerException):
|
||||
def __init__(self, msg: str) -> None:
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.msg + (
|
||||
". TLS configurations should map the Docker CLI "
|
||||
"client configurations. See "
|
||||
"https://docs.docker.com/engine/articles/https/ "
|
||||
"for API details."
|
||||
)
|
||||
|
||||
|
||||
class NullResource(DockerException, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class ContainerError(DockerException):
|
||||
"""
|
||||
Represents a container that has exited with a non-zero exit code.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
container: str,
|
||||
exit_status: int,
|
||||
command: list[str],
|
||||
image: str,
|
||||
stderr: str | None,
|
||||
):
|
||||
self.container = container
|
||||
self.exit_status = exit_status
|
||||
self.command = command
|
||||
self.image = image
|
||||
self.stderr = stderr
|
||||
|
||||
err = f": {stderr}" if stderr is not None else ""
|
||||
msg = f"Command '{command}' in image '{image}' returned non-zero exit status {exit_status}{err}"
|
||||
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class StreamParseError(RuntimeError):
|
||||
def __init__(self, reason: Exception) -> None:
|
||||
self.msg = reason
|
||||
|
||||
|
||||
class BuildError(DockerException):
|
||||
def __init__(self, reason: str, build_log: str) -> None:
|
||||
super().__init__(reason)
|
||||
self.msg = reason
|
||||
self.build_log = build_log
|
||||
|
||||
|
||||
class ImageLoadError(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
def create_unexpected_kwargs_error(name: str, kwargs: dict[str, t.Any]) -> TypeError:
|
||||
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
|
||||
text = [f"{name}() "]
|
||||
if len(quoted_kwargs) == 1:
|
||||
text.append("got an unexpected keyword argument ")
|
||||
else:
|
||||
text.append("got unexpected keyword arguments ")
|
||||
text.append(", ".join(quoted_kwargs))
|
||||
return TypeError("".join(text))
|
||||
|
||||
|
||||
class MissingContextParameter(DockerException):
|
||||
def __init__(self, param: str) -> None:
|
||||
self.param = param
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"missing parameter: {self.param}"
|
||||
|
||||
|
||||
class ContextAlreadyExists(DockerException):
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = name
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"context {self.name} already exists"
|
||||
|
||||
|
||||
class ContextException(DockerException):
|
||||
def __init__(self, msg: str) -> None:
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.msg
|
||||
|
||||
|
||||
class ContextNotFound(DockerException):
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = name
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"context '{self.name}' not found"
|
||||
|
||||
|
||||
class MissingRequirementException(DockerException):
|
||||
def __init__(
|
||||
self, msg: str, requirement: str, import_exception: ImportError | str
|
||||
) -> None:
|
||||
self.msg = msg
|
||||
self.requirement = requirement
|
||||
self.import_exception = import_exception
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.msg
|
||||
108
plugins/module_utils/_api/tls.py
Normal file
108
plugins/module_utils/_api/tls.py
Normal file
@ -0,0 +1,108 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import typing as t
|
||||
|
||||
from . import errors
|
||||
from .transport.ssladapter import SSLHTTPAdapter
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||
APIClient,
|
||||
)
|
||||
|
||||
|
||||
class TLSConfig:
|
||||
"""
|
||||
TLS configuration.
|
||||
|
||||
Args:
|
||||
client_cert (tuple of str): Path to client cert, path to client key.
|
||||
ca_cert (str): Path to CA cert file.
|
||||
verify (bool or str): This can be ``False`` or a path to a CA cert
|
||||
file.
|
||||
assert_hostname (bool): Verify the hostname of the server.
|
||||
|
||||
.. _`SSL version`:
|
||||
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
|
||||
"""
|
||||
|
||||
cert: tuple[str, str] | None = None
|
||||
ca_cert: str | None = None
|
||||
verify: bool | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client_cert: tuple[str, str] | None = None,
|
||||
ca_cert: str | None = None,
|
||||
verify: bool | None = None,
|
||||
assert_hostname: bool | None = None,
|
||||
):
|
||||
# Argument compatibility/mapping with
|
||||
# https://docs.docker.com/engine/articles/https/
|
||||
# This diverges from the Docker CLI in that users can specify 'tls'
|
||||
# here, but also disable any public/default CA pool verification by
|
||||
# leaving verify=False
|
||||
|
||||
self.assert_hostname = assert_hostname
|
||||
|
||||
# "client_cert" must have both or neither cert/key files. In
|
||||
# either case, Alert the user when both are expected, but any are
|
||||
# missing.
|
||||
|
||||
if client_cert:
|
||||
try:
|
||||
tls_cert, tls_key = client_cert
|
||||
except ValueError:
|
||||
raise errors.TLSParameterError(
|
||||
"client_cert must be a tuple of (client certificate, key file)"
|
||||
) from None
|
||||
|
||||
if not (tls_cert and tls_key) or (
|
||||
not os.path.isfile(tls_cert) or not os.path.isfile(tls_key)
|
||||
):
|
||||
raise errors.TLSParameterError(
|
||||
"Path to a certificate and key files must be provided"
|
||||
" through the client_cert param"
|
||||
)
|
||||
self.cert = (tls_cert, tls_key)
|
||||
|
||||
# If verify is set, make sure the cert exists
|
||||
self.verify = verify
|
||||
self.ca_cert = ca_cert
|
||||
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
|
||||
raise errors.TLSParameterError(
|
||||
"Invalid CA certificate provided for `ca_cert`."
|
||||
)
|
||||
|
||||
def configure_client(self, client: APIClient) -> None:
|
||||
"""
|
||||
Configure a client with these TLS options.
|
||||
"""
|
||||
|
||||
if self.verify and self.ca_cert:
|
||||
client.verify = self.ca_cert
|
||||
else:
|
||||
client.verify = self.verify
|
||||
|
||||
if self.cert:
|
||||
client.cert = self.cert
|
||||
|
||||
client.mount(
|
||||
"https://",
|
||||
SSLHTTPAdapter(
|
||||
assert_hostname=self.assert_hostname,
|
||||
),
|
||||
)
|
||||
35
plugins/module_utils/_api/transport/basehttpadapter.py
Normal file
35
plugins/module_utils/_api/transport/basehttpadapter.py
Normal file
@ -0,0 +1,35 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .._import_helper import HTTPAdapter as _HTTPAdapter
|
||||
|
||||
|
||||
class BaseHTTPAdapter(_HTTPAdapter):
|
||||
def close(self) -> None:
|
||||
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
|
||||
# pylint: disable-next=no-member
|
||||
super().close()
|
||||
if hasattr(self, "pools"):
|
||||
self.pools.clear()
|
||||
|
||||
# Hotfix for requests 2.32.0 and 2.32.1: its commit
|
||||
# https://github.com/psf/requests/commit/c0813a2d910ea6b4f8438b91d315b8d181302356
|
||||
# changes requests.adapters.HTTPAdapter to no longer call get_connection() from
|
||||
# send(), but instead call _get_connection().
|
||||
def _get_connection(self, request, *args, **kwargs): # type: ignore
|
||||
return self.get_connection(request.url, kwargs.get("proxies"))
|
||||
|
||||
# Fix for requests 2.32.2+:
|
||||
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
|
||||
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): # type: ignore
|
||||
return self.get_connection(request.url, proxies)
|
||||
124
plugins/module_utils/_api/transport/npipeconn.py
Normal file
124
plugins/module_utils/_api/transport/npipeconn.py
Normal file
@ -0,0 +1,124 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from queue import Empty
|
||||
|
||||
from .. import constants
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
from .npipesocket import NpipeSocket
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Mapping
|
||||
|
||||
from requests import PreparedRequest
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class NpipeHTTPConnection(urllib3_connection.HTTPConnection):
|
||||
def __init__(self, npipe_path: str, timeout: int | float = 60) -> None:
|
||||
super().__init__("localhost", timeout=timeout)
|
||||
self.npipe_path = npipe_path
|
||||
self.timeout = timeout
|
||||
|
||||
def connect(self) -> None:
|
||||
sock = NpipeSocket()
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect(self.npipe_path)
|
||||
self.sock = sock
|
||||
|
||||
|
||||
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(
|
||||
self, npipe_path: str, timeout: int | float = 60, maxsize: int = 10
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
|
||||
self.npipe_path = npipe_path
|
||||
self.timeout = timeout
|
||||
|
||||
def _new_conn(self) -> NpipeHTTPConnection:
|
||||
return NpipeHTTPConnection(self.npipe_path, self.timeout)
|
||||
|
||||
# When re-using connections, urllib3 tries to call select() on our
|
||||
# NpipeSocket instance, causing a crash. To circumvent this, we override
|
||||
# _get_conn, where that check happens.
|
||||
def _get_conn(self, timeout: int | float) -> NpipeHTTPConnection:
|
||||
conn = None
|
||||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
|
||||
except AttributeError as exc: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
|
||||
|
||||
except Empty as exc:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more connections are allowed.",
|
||||
) from exc
|
||||
# Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
||||
class NpipeHTTPAdapter(BaseHTTPAdapter):
|
||||
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||
"npipe_path",
|
||||
"pools",
|
||||
"timeout",
|
||||
"max_pool_size",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str,
|
||||
timeout: int | float = 60,
|
||||
pool_connections: int = constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
|
||||
) -> None:
|
||||
self.npipe_path = base_url.replace("npipe://", "")
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super().__init__()
|
||||
|
||||
def get_connection(
|
||||
self, url: str | bytes, proxies: Mapping[str, str] | None = None
|
||||
) -> NpipeHTTPConnectionPool:
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
pool = NpipeHTTPConnectionPool(
|
||||
self.npipe_path, self.timeout, maxsize=self.max_pool_size
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def request_url(
|
||||
self, request: PreparedRequest, proxies: Mapping[str, str] | None
|
||||
) -> str:
|
||||
# The select_proxy utility in requests errors out when the provided URL
|
||||
# does not have a hostname, like is the case when using a UNIX socket.
|
||||
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||
# anyway, we simply return the path URL directly.
|
||||
# See also: https://github.com/docker/docker-sdk-python/issues/811
|
||||
return request.path_url
|
||||
278
plugins/module_utils/_api/transport/npipesocket.py
Normal file
278
plugins/module_utils/_api/transport/npipesocket.py
Normal file
@ -0,0 +1,278 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import io
|
||||
import time
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
|
||||
PYWIN32_IMPORT_ERROR: str | None # pylint: disable=invalid-name
|
||||
try:
|
||||
import pywintypes
|
||||
import win32api
|
||||
import win32event
|
||||
import win32file
|
||||
import win32pipe
|
||||
except ImportError:
|
||||
PYWIN32_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||
else:
|
||||
PYWIN32_IMPORT_ERROR = None # pylint: disable=invalid-name
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Buffer, Callable
|
||||
|
||||
_Self = t.TypeVar("_Self")
|
||||
_P = t.ParamSpec("_P")
|
||||
_R = t.TypeVar("_R")
|
||||
|
||||
|
||||
ERROR_PIPE_BUSY = 0xE7
|
||||
SECURITY_SQOS_PRESENT = 0x100000
|
||||
SECURITY_ANONYMOUS = 0
|
||||
|
||||
MAXIMUM_RETRY_COUNT = 10
|
||||
|
||||
|
||||
def check_closed(
|
||||
f: Callable[t.Concatenate[_Self, _P], _R],
|
||||
) -> Callable[t.Concatenate[_Self, _P], _R]:
|
||||
@functools.wraps(f)
|
||||
def wrapped(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||
if self._closed: # type: ignore
|
||||
raise RuntimeError("Can not reuse socket after connection was closed.")
|
||||
return f(self, *args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
class NpipeSocket:
|
||||
"""Partial implementation of the socket API over windows named pipes.
|
||||
This implementation is only designed to be used as a client socket,
|
||||
and server-specific methods (bind, listen, accept...) are not
|
||||
implemented.
|
||||
"""
|
||||
|
||||
def __init__(self, handle: t.Any | None = None) -> None:
|
||||
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
|
||||
self._handle = handle
|
||||
self._address: str | None = None
|
||||
self._closed = False
|
||||
self.flags: int | None = None
|
||||
|
||||
def accept(self) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def bind(self, address: t.Any) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self) -> None:
|
||||
if self._handle is None:
|
||||
raise ValueError("Handle not present")
|
||||
self._handle.Close()
|
||||
self._closed = True
|
||||
|
||||
@check_closed
|
||||
def connect(self, address: str, retry_count: int = 0) -> None:
|
||||
try:
|
||||
handle = win32file.CreateFile(
|
||||
address,
|
||||
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
|
||||
0,
|
||||
None,
|
||||
win32file.OPEN_EXISTING,
|
||||
(
|
||||
SECURITY_ANONYMOUS
|
||||
| SECURITY_SQOS_PRESENT
|
||||
| win32file.FILE_FLAG_OVERLAPPED
|
||||
),
|
||||
0,
|
||||
)
|
||||
except win32pipe.error as e:
|
||||
# See Remarks:
|
||||
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
|
||||
if e.winerror == ERROR_PIPE_BUSY:
|
||||
# Another program or thread has grabbed our pipe instance
|
||||
# before we got to it. Wait for availability and attempt to
|
||||
# connect again.
|
||||
retry_count = retry_count + 1
|
||||
if retry_count < MAXIMUM_RETRY_COUNT:
|
||||
time.sleep(1)
|
||||
return self.connect(address, retry_count)
|
||||
raise e
|
||||
|
||||
self.flags = win32pipe.GetNamedPipeInfo(handle)[0] # type: ignore
|
||||
|
||||
self._handle = handle
|
||||
self._address = address
|
||||
|
||||
@check_closed
|
||||
def connect_ex(self, address: str) -> None:
|
||||
self.connect(address)
|
||||
|
||||
@check_closed
|
||||
def detach(self) -> t.Any:
|
||||
self._closed = True
|
||||
return self._handle
|
||||
|
||||
@check_closed
|
||||
def dup(self) -> NpipeSocket:
|
||||
return NpipeSocket(self._handle)
|
||||
|
||||
def getpeername(self) -> str | None:
|
||||
return self._address
|
||||
|
||||
def getsockname(self) -> str | None:
|
||||
return self._address
|
||||
|
||||
def getsockopt(
|
||||
self, level: t.Any, optname: t.Any, buflen: t.Any = None
|
||||
) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def ioctl(self, control: t.Any, option: t.Any) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def listen(self, backlog: t.Any) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def makefile(self, mode: str, bufsize: int | None = None) -> t.IO[bytes]:
|
||||
if mode.strip("b") != "r":
|
||||
raise NotImplementedError()
|
||||
rawio = NpipeFileIOBase(self)
|
||||
if bufsize is None or bufsize <= 0:
|
||||
bufsize = io.DEFAULT_BUFFER_SIZE
|
||||
return io.BufferedReader(rawio, buffer_size=bufsize)
|
||||
|
||||
@check_closed
|
||||
def recv(self, bufsize: int, flags: int = 0) -> str:
|
||||
if self._handle is None:
|
||||
raise ValueError("Handle not present")
|
||||
dummy_err, data = win32file.ReadFile(self._handle, bufsize)
|
||||
return data
|
||||
|
||||
@check_closed
|
||||
def recvfrom(self, bufsize: int, flags: int = 0) -> tuple[str, str | None]:
|
||||
data = self.recv(bufsize, flags)
|
||||
return (data, self._address)
|
||||
|
||||
@check_closed
|
||||
def recvfrom_into(
|
||||
self, buf: Buffer, nbytes: int = 0, flags: int = 0
|
||||
) -> tuple[int, str | None]:
|
||||
return self.recv_into(buf, nbytes), self._address
|
||||
|
||||
@check_closed
|
||||
def recv_into(self, buf: Buffer, nbytes: int = 0) -> int:
|
||||
if self._handle is None:
|
||||
raise ValueError("Handle not present")
|
||||
readbuf = buf if isinstance(buf, memoryview) else memoryview(buf)
|
||||
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
dummy_err, dummy_data = win32file.ReadFile( # type: ignore
|
||||
self._handle, readbuf[:nbytes] if nbytes else readbuf, overlapped
|
||||
)
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
@check_closed
|
||||
def send(self, string: Buffer, flags: int = 0) -> int:
|
||||
if self._handle is None:
|
||||
raise ValueError("Handle not present")
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
win32file.WriteFile(self._handle, string, overlapped) # type: ignore
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
@check_closed
|
||||
def sendall(self, string: Buffer, flags: int = 0) -> int:
|
||||
return self.send(string, flags)
|
||||
|
||||
@check_closed
|
||||
def sendto(self, string: Buffer, address: str) -> int:
|
||||
self.connect(address)
|
||||
return self.send(string)
|
||||
|
||||
def setblocking(self, flag: bool) -> None:
|
||||
if flag:
|
||||
return self.settimeout(None)
|
||||
return self.settimeout(0)
|
||||
|
||||
def settimeout(self, value: int | float | None) -> None:
|
||||
if value is None:
|
||||
# Blocking mode
|
||||
self._timeout = win32event.INFINITE
|
||||
elif not isinstance(value, (float, int)) or value < 0:
|
||||
raise ValueError("Timeout value out of range")
|
||||
else:
|
||||
# Timeout mode - Value converted to milliseconds
|
||||
self._timeout = int(value * 1000)
|
||||
|
||||
def gettimeout(self) -> int | float | None:
|
||||
return self._timeout
|
||||
|
||||
def setsockopt(self, level: t.Any, optname: t.Any, value: t.Any) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
@check_closed
|
||||
def shutdown(self, how: t.Any) -> None:
|
||||
return self.close()
|
||||
|
||||
|
||||
class NpipeFileIOBase(io.RawIOBase):
|
||||
def __init__(self, npipe_socket: NpipeSocket | None) -> None:
|
||||
self.sock = npipe_socket
|
||||
|
||||
def close(self) -> None:
|
||||
super().close()
|
||||
self.sock = None
|
||||
|
||||
def fileno(self) -> int:
|
||||
if self.sock is None:
|
||||
raise RuntimeError("socket is closed")
|
||||
# TODO: This is definitely a bug, NpipeSocket.fileno() does not exist!
|
||||
return self.sock.fileno() # type: ignore
|
||||
|
||||
def isatty(self) -> bool:
|
||||
return False
|
||||
|
||||
def readable(self) -> bool:
|
||||
return True
|
||||
|
||||
def readinto(self, buf: Buffer) -> int:
|
||||
if self.sock is None:
|
||||
raise RuntimeError("socket is closed")
|
||||
return self.sock.recv_into(buf)
|
||||
|
||||
def seekable(self) -> bool:
|
||||
return False
|
||||
|
||||
def writable(self) -> bool:
|
||||
return False
|
||||
312
plugins/module_utils/_api/transport/sshconn.py
Normal file
312
plugins/module_utils/_api/transport/sshconn.py
Normal file
@ -0,0 +1,312 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import traceback
|
||||
import typing as t
|
||||
from queue import Empty
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from .. import constants
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
|
||||
PARAMIKO_IMPORT_ERROR: str | None # pylint: disable=invalid-name
|
||||
try:
|
||||
import paramiko
|
||||
except ImportError:
|
||||
PARAMIKO_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||
else:
|
||||
PARAMIKO_IMPORT_ERROR = None # pylint: disable=invalid-name
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Buffer, Mapping
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class SSHSocket(socket.socket):
|
||||
def __init__(self, host: str) -> None:
|
||||
super().__init__(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.host = host
|
||||
self.port = None
|
||||
self.user = None
|
||||
if ":" in self.host:
|
||||
self.host, self.port = self.host.split(":")
|
||||
if "@" in self.host:
|
||||
self.user, self.host = self.host.split("@")
|
||||
|
||||
self.proc: subprocess.Popen | None = None
|
||||
|
||||
def connect(self, *args_: t.Any, **kwargs: t.Any) -> None:
|
||||
args = ["ssh"]
|
||||
if self.user:
|
||||
args = args + ["-l", self.user]
|
||||
|
||||
if self.port:
|
||||
args = args + ["-p", self.port]
|
||||
|
||||
args = args + ["--", self.host, "docker system dial-stdio"]
|
||||
|
||||
preexec_func = None
|
||||
if not constants.IS_WINDOWS_PLATFORM:
|
||||
|
||||
def f() -> None:
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
preexec_func = f
|
||||
|
||||
env = dict(os.environ)
|
||||
|
||||
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
|
||||
env.pop("LD_LIBRARY_PATH", None)
|
||||
env.pop("SSL_CERT_FILE", None)
|
||||
|
||||
self.proc = subprocess.Popen( # pylint: disable=consider-using-with
|
||||
args,
|
||||
env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_func,
|
||||
)
|
||||
|
||||
def _write(self, data: Buffer) -> int:
|
||||
if not self.proc:
|
||||
raise RuntimeError(
|
||||
"SSH subprocess not initiated. connect() must be called first."
|
||||
)
|
||||
assert self.proc.stdin is not None
|
||||
if self.proc.stdin.closed:
|
||||
raise RuntimeError(
|
||||
"SSH subprocess not initiated. connect() must be called first after close()."
|
||||
)
|
||||
written = self.proc.stdin.write(data)
|
||||
self.proc.stdin.flush()
|
||||
return written
|
||||
|
||||
def sendall(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
self._write(data)
|
||||
|
||||
def send(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> int:
|
||||
return self._write(data)
|
||||
|
||||
def recv(self, n: int, *args: t.Any, **kwargs: t.Any) -> bytes:
|
||||
if not self.proc:
|
||||
raise RuntimeError(
|
||||
"SSH subprocess not initiated. connect() must be called first."
|
||||
)
|
||||
assert self.proc.stdout is not None
|
||||
return self.proc.stdout.read(n)
|
||||
|
||||
def makefile(self, mode: str, *args: t.Any, **kwargs: t.Any) -> t.IO: # type: ignore
|
||||
if not self.proc:
|
||||
self.connect()
|
||||
assert self.proc is not None
|
||||
assert self.proc.stdout is not None
|
||||
self.proc.stdout.channel = self # type: ignore
|
||||
|
||||
return self.proc.stdout
|
||||
|
||||
def close(self) -> None:
|
||||
if not self.proc:
|
||||
return
|
||||
assert self.proc.stdin is not None
|
||||
if self.proc.stdin.closed:
|
||||
return
|
||||
self.proc.stdin.write(b"\n\n")
|
||||
self.proc.stdin.flush()
|
||||
self.proc.terminate()
|
||||
|
||||
|
||||
class SSHConnection(urllib3_connection.HTTPConnection):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
ssh_transport: paramiko.Transport | None = None,
|
||||
timeout: int | float = 60,
|
||||
host: str,
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout)
|
||||
self.ssh_transport = ssh_transport
|
||||
self.timeout = timeout
|
||||
self.ssh_host = host
|
||||
self.sock: paramiko.Channel | SSHSocket | None = None
|
||||
|
||||
def connect(self) -> None:
|
||||
if self.ssh_transport:
|
||||
channel = self.ssh_transport.open_session()
|
||||
channel.settimeout(self.timeout)
|
||||
channel.exec_command("docker system dial-stdio")
|
||||
self.sock = channel
|
||||
else:
|
||||
sock = SSHSocket(self.ssh_host)
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect()
|
||||
self.sock = sock
|
||||
|
||||
|
||||
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
scheme = "ssh"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
ssh_client: paramiko.SSHClient | None = None,
|
||||
timeout: int | float = 60,
|
||||
maxsize: int = 10,
|
||||
host: str,
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
|
||||
self.ssh_transport: paramiko.Transport | None = None
|
||||
self.timeout = timeout
|
||||
if ssh_client:
|
||||
self.ssh_transport = ssh_client.get_transport()
|
||||
self.ssh_host = host
|
||||
|
||||
def _new_conn(self) -> SSHConnection:
|
||||
return SSHConnection(
|
||||
ssh_transport=self.ssh_transport,
|
||||
timeout=self.timeout,
|
||||
host=self.ssh_host,
|
||||
)
|
||||
|
||||
# When re-using connections, urllib3 calls fileno() on our
|
||||
# SSH channel instance, quickly overloading our fd limit. To avoid this,
|
||||
# we override _get_conn
|
||||
def _get_conn(self, timeout: int | float) -> SSHConnection:
|
||||
conn = None
|
||||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
|
||||
except AttributeError as exc: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
|
||||
|
||||
except Empty as exc:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more connections are allowed.",
|
||||
) from exc
|
||||
# Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
||||
class SSHHTTPAdapter(BaseHTTPAdapter):
|
||||
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||
"pools",
|
||||
"timeout",
|
||||
"ssh_client",
|
||||
"ssh_params",
|
||||
"max_pool_size",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str,
|
||||
timeout: int | float = 60,
|
||||
pool_connections: int = constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
|
||||
shell_out: bool = False,
|
||||
) -> None:
|
||||
self.ssh_client: paramiko.SSHClient | None = None
|
||||
if not shell_out:
|
||||
self._create_paramiko_client(base_url)
|
||||
self._connect()
|
||||
|
||||
self.ssh_host = base_url
|
||||
if base_url.startswith("ssh://"):
|
||||
self.ssh_host = base_url[len("ssh://") :]
|
||||
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super().__init__()
|
||||
|
||||
def _create_paramiko_client(self, base_url: str) -> None:
|
||||
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||
self.ssh_client = paramiko.SSHClient()
|
||||
base_url_p = urlparse(base_url)
|
||||
assert base_url_p.hostname is not None
|
||||
self.ssh_params: dict[str, t.Any] = {
|
||||
"hostname": base_url_p.hostname,
|
||||
"port": base_url_p.port,
|
||||
"username": base_url_p.username,
|
||||
}
|
||||
ssh_config_file = os.path.expanduser("~/.ssh/config")
|
||||
if os.path.exists(ssh_config_file):
|
||||
conf = paramiko.SSHConfig()
|
||||
with open(ssh_config_file, "rt", encoding="utf-8") as f:
|
||||
conf.parse(f)
|
||||
host_config = conf.lookup(base_url_p.hostname)
|
||||
if "proxycommand" in host_config:
|
||||
self.ssh_params["sock"] = paramiko.ProxyCommand(
|
||||
host_config["proxycommand"]
|
||||
)
|
||||
if "hostname" in host_config:
|
||||
self.ssh_params["hostname"] = host_config["hostname"]
|
||||
if base_url_p.port is None and "port" in host_config:
|
||||
self.ssh_params["port"] = host_config["port"]
|
||||
if base_url_p.username is None and "user" in host_config:
|
||||
self.ssh_params["username"] = host_config["user"]
|
||||
if "identityfile" in host_config:
|
||||
self.ssh_params["key_filename"] = host_config["identityfile"]
|
||||
|
||||
self.ssh_client.load_system_host_keys()
|
||||
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
|
||||
|
||||
def _connect(self) -> None:
|
||||
if self.ssh_client:
|
||||
self.ssh_client.connect(**self.ssh_params)
|
||||
|
||||
def get_connection(
|
||||
self, url: str | bytes, proxies: Mapping[str, str] | None = None
|
||||
) -> SSHConnectionPool:
|
||||
if not self.ssh_client:
|
||||
return SSHConnectionPool(
|
||||
ssh_client=self.ssh_client,
|
||||
timeout=self.timeout,
|
||||
maxsize=self.max_pool_size,
|
||||
host=self.ssh_host,
|
||||
)
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
# Connection is closed try a reconnect
|
||||
if self.ssh_client and not self.ssh_client.get_transport():
|
||||
self._connect()
|
||||
|
||||
pool = SSHConnectionPool(
|
||||
ssh_client=self.ssh_client,
|
||||
timeout=self.timeout,
|
||||
maxsize=self.max_pool_size,
|
||||
host=self.ssh_host,
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def close(self) -> None:
|
||||
super().close()
|
||||
if self.ssh_client:
|
||||
self.ssh_client.close()
|
||||
72
plugins/module_utils/_api/transport/ssladapter.py
Normal file
72
plugins/module_utils/_api/transport/ssladapter.py
Normal file
@ -0,0 +1,72 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from .._import_helper import HTTPAdapter, urllib3
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
|
||||
# Resolves OpenSSL issues in some servers:
|
||||
# https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
|
||||
# https://github.com/kennethreitz/requests/pull/799
|
||||
|
||||
|
||||
PoolManager = urllib3.poolmanager.PoolManager
|
||||
|
||||
|
||||
class SSLHTTPAdapter(BaseHTTPAdapter):
|
||||
"""An HTTPS Transport Adapter that uses an arbitrary SSL version."""
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ["assert_hostname"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
assert_hostname: bool | None = None,
|
||||
**kwargs: t.Any,
|
||||
) -> None:
|
||||
self.assert_hostname = assert_hostname
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(
|
||||
self, connections: int, maxsize: int, block: bool = False, **kwargs: t.Any
|
||||
) -> None:
|
||||
kwargs = {
|
||||
"num_pools": connections,
|
||||
"maxsize": maxsize,
|
||||
"block": block,
|
||||
}
|
||||
if self.assert_hostname is not None:
|
||||
kwargs["assert_hostname"] = self.assert_hostname
|
||||
|
||||
self.poolmanager = PoolManager(**kwargs)
|
||||
|
||||
def get_connection(self, *args: t.Any, **kwargs: t.Any) -> urllib3.ConnectionPool:
|
||||
"""
|
||||
Ensure assert_hostname is set correctly on our pool
|
||||
|
||||
We already take care of a normal poolmanager via init_poolmanager
|
||||
|
||||
But we still need to take care of when there is a proxy poolmanager
|
||||
|
||||
Note that this method is no longer called for newer requests versions.
|
||||
"""
|
||||
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
|
||||
# pylint: disable-next=no-member
|
||||
conn = super().get_connection(*args, **kwargs)
|
||||
if (
|
||||
self.assert_hostname is not None
|
||||
and conn.assert_hostname != self.assert_hostname # type: ignore
|
||||
):
|
||||
conn.assert_hostname = self.assert_hostname # type: ignore
|
||||
return conn
|
||||
127
plugins/module_utils/_api/transport/unixconn.py
Normal file
127
plugins/module_utils/_api/transport/unixconn.py
Normal file
@ -0,0 +1,127 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
import typing as t
|
||||
|
||||
from .. import constants
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Mapping
|
||||
|
||||
from requests import PreparedRequest
|
||||
|
||||
from ..._socket_helper import SocketLike
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class UnixHTTPConnection(urllib3_connection.HTTPConnection):
|
||||
def __init__(
|
||||
self, base_url: str | bytes, unix_socket: str, timeout: int | float = 60
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout)
|
||||
self.base_url = base_url
|
||||
self.unix_socket = unix_socket
|
||||
self.timeout = timeout
|
||||
self.disable_buffering = False
|
||||
|
||||
def connect(self) -> None:
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect(self.unix_socket)
|
||||
self.sock = sock
|
||||
|
||||
def putheader(self, header: str, *values: str) -> None:
|
||||
super().putheader(header, *values)
|
||||
if header == "Connection" and "Upgrade" in values:
|
||||
self.disable_buffering = True
|
||||
|
||||
def response_class(self, sock: SocketLike, *args: t.Any, **kwargs: t.Any) -> t.Any:
|
||||
# FIXME: We may need to disable buffering on Py3,
|
||||
# but there's no clear way to do it at the moment. See:
|
||||
# https://github.com/docker/docker-py/issues/1799
|
||||
return super().response_class(sock, *args, **kwargs)
|
||||
|
||||
|
||||
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str | bytes,
|
||||
socket_path: str,
|
||||
timeout: int | float = 60,
|
||||
maxsize: int = 10,
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
|
||||
self.base_url = base_url
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
|
||||
def _new_conn(self) -> UnixHTTPConnection:
|
||||
return UnixHTTPConnection(self.base_url, self.socket_path, self.timeout)
|
||||
|
||||
|
||||
class UnixHTTPAdapter(BaseHTTPAdapter):
|
||||
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||
"pools",
|
||||
"socket_path",
|
||||
"timeout",
|
||||
"max_pool_size",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
socket_url: str,
|
||||
timeout: int | float = 60,
|
||||
pool_connections: int = constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
|
||||
) -> None:
|
||||
socket_path = socket_url.replace("http+unix://", "")
|
||||
if not socket_path.startswith("/"):
|
||||
socket_path = "/" + socket_path
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
|
||||
def f(p: t.Any) -> None:
|
||||
p.close()
|
||||
|
||||
self.pools = RecentlyUsedContainer(pool_connections, dispose_func=f)
|
||||
super().__init__()
|
||||
|
||||
def get_connection(
|
||||
self, url: str | bytes, proxies: Mapping[str, str] | None = None
|
||||
) -> UnixHTTPConnectionPool:
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
pool = UnixHTTPConnectionPool(
|
||||
url, self.socket_path, self.timeout, maxsize=self.max_pool_size
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def request_url(self, request: PreparedRequest, proxies: Mapping[str, str]) -> str:
|
||||
# The select_proxy utility in requests errors out when the provided URL
|
||||
# does not have a hostname, like is the case when using a UNIX socket.
|
||||
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||
# anyway, we simply return the path URL directly.
|
||||
# See also: https://github.com/docker/docker-py/issues/811
|
||||
return request.path_url
|
||||
91
plugins/module_utils/_api/types/daemon.py
Normal file
91
plugins/module_utils/_api/types/daemon.py
Normal file
@ -0,0 +1,91 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
import typing as t
|
||||
|
||||
from .._import_helper import urllib3
|
||||
from ..errors import DockerException
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from requests import Response
|
||||
|
||||
_T = t.TypeVar("_T")
|
||||
|
||||
|
||||
class CancellableStream(t.Generic[_T]):
|
||||
"""
|
||||
Stream wrapper for real-time events, logs, etc. from the server.
|
||||
|
||||
Example:
|
||||
>>> events = client.events()
|
||||
>>> for event in events:
|
||||
... print(event)
|
||||
>>> # and cancel from another thread
|
||||
>>> events.close()
|
||||
"""
|
||||
|
||||
def __init__(self, stream: t.Generator[_T], response: Response) -> None:
|
||||
self._stream = stream
|
||||
self._response = response
|
||||
|
||||
def __iter__(self) -> t.Self:
|
||||
return self
|
||||
|
||||
def __next__(self) -> _T:
|
||||
try:
|
||||
return next(self._stream)
|
||||
except urllib3.exceptions.ProtocolError as exc:
|
||||
raise StopIteration from exc
|
||||
except socket.error as exc:
|
||||
raise StopIteration from exc
|
||||
|
||||
next = __next__
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Closes the event streaming.
|
||||
"""
|
||||
|
||||
if not self._response.raw.closed:
|
||||
# find the underlying socket object
|
||||
# based on api.client._get_raw_response_socket
|
||||
|
||||
sock_fp = self._response.raw._fp.fp # type: ignore
|
||||
|
||||
if hasattr(sock_fp, "raw"):
|
||||
sock_raw = sock_fp.raw
|
||||
|
||||
if hasattr(sock_raw, "sock"):
|
||||
sock = sock_raw.sock
|
||||
|
||||
elif hasattr(sock_raw, "_sock"):
|
||||
sock = sock_raw._sock
|
||||
|
||||
elif hasattr(sock_fp, "channel"):
|
||||
# We are working with a paramiko (SSH) channel, which does not
|
||||
# support cancelable streams with the current implementation
|
||||
raise DockerException(
|
||||
"Cancellable streams not supported for the SSH protocol"
|
||||
)
|
||||
else:
|
||||
sock = sock_fp._sock # type: ignore
|
||||
|
||||
if hasattr(urllib3.contrib, "pyopenssl") and isinstance(
|
||||
sock, urllib3.contrib.pyopenssl.WrappedSocket
|
||||
):
|
||||
sock = sock.socket
|
||||
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
sock.close()
|
||||
311
plugins/module_utils/_api/utils/build.py
Normal file
311
plugins/module_utils/_api/utils/build.py
Normal file
@ -0,0 +1,311 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import tarfile
|
||||
import tempfile
|
||||
import typing as t
|
||||
|
||||
from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
|
||||
from . import fnmatch
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Sequence
|
||||
|
||||
|
||||
_SEP = re.compile("/|\\\\") if IS_WINDOWS_PLATFORM else re.compile("/")
|
||||
|
||||
|
||||
def tar(
|
||||
path: str,
|
||||
exclude: list[str] | None = None,
|
||||
dockerfile: tuple[str, str | None] | tuple[None, None] | None = None,
|
||||
fileobj: t.IO[bytes] | None = None,
|
||||
gzip: bool = False,
|
||||
) -> t.IO[bytes]:
|
||||
root = os.path.abspath(path)
|
||||
exclude = exclude or []
|
||||
dockerfile = dockerfile or (None, None)
|
||||
extra_files: list[tuple[str, str]] = []
|
||||
if dockerfile[1] is not None:
|
||||
assert dockerfile[0] is not None
|
||||
dockerignore_contents = "\n".join(
|
||||
(exclude or [".dockerignore"]) + [dockerfile[0]]
|
||||
)
|
||||
extra_files = [
|
||||
(".dockerignore", dockerignore_contents),
|
||||
dockerfile, # type: ignore
|
||||
]
|
||||
return create_archive(
|
||||
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
|
||||
root=root,
|
||||
fileobj=fileobj,
|
||||
gzip=gzip,
|
||||
extra_files=extra_files,
|
||||
)
|
||||
|
||||
|
||||
def exclude_paths(
|
||||
root: str, patterns: list[str], dockerfile: str | None = None
|
||||
) -> set[str]:
|
||||
"""
|
||||
Given a root directory path and a list of .dockerignore patterns, return
|
||||
an iterator of all paths (both regular files and directories) in the root
|
||||
directory that do *not* match any of the patterns.
|
||||
|
||||
All paths returned are relative to the root.
|
||||
"""
|
||||
|
||||
if dockerfile is None:
|
||||
dockerfile = "Dockerfile"
|
||||
|
||||
patterns.append("!" + dockerfile)
|
||||
pm = PatternMatcher(patterns)
|
||||
return set(pm.walk(root))
|
||||
|
||||
|
||||
def build_file_list(root: str) -> list[str]:
|
||||
files = []
|
||||
for dirname, dirnames, fnames in os.walk(root):
|
||||
for filename in fnames + dirnames:
|
||||
longpath = os.path.join(dirname, filename)
|
||||
files.append(longpath.replace(root, "", 1).lstrip("/"))
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def create_archive(
|
||||
root: str,
|
||||
files: Sequence[str] | None = None,
|
||||
fileobj: t.IO[bytes] | None = None,
|
||||
gzip: bool = False,
|
||||
extra_files: Sequence[tuple[str, str]] | None = None,
|
||||
) -> t.IO[bytes]:
|
||||
extra_files = extra_files or []
|
||||
if not fileobj:
|
||||
# pylint: disable-next=consider-using-with
|
||||
fileobj = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||
|
||||
with tarfile.open(mode="w:gz" if gzip else "w", fileobj=fileobj) as tarf:
|
||||
if files is None:
|
||||
files = build_file_list(root)
|
||||
extra_names = set(e[0] for e in extra_files)
|
||||
for path in files:
|
||||
if path in extra_names:
|
||||
# Extra files override context files with the same name
|
||||
continue
|
||||
full_path = os.path.join(root, path)
|
||||
|
||||
i = tarf.gettarinfo(full_path, arcname=path)
|
||||
if i is None:
|
||||
# This happens when we encounter a socket file. We can safely
|
||||
# ignore it and proceed.
|
||||
continue # type: ignore
|
||||
|
||||
# Workaround https://bugs.python.org/issue32713
|
||||
if i.mtime < 0 or i.mtime > 8**11 - 1:
|
||||
i.mtime = int(i.mtime)
|
||||
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
# Windows does not keep track of the execute bit, so we make files
|
||||
# and directories executable by default.
|
||||
i.mode = i.mode & 0o755 | 0o111
|
||||
|
||||
if i.isfile():
|
||||
try:
|
||||
with open(full_path, "rb") as f:
|
||||
tarf.addfile(i, f)
|
||||
except IOError as exc:
|
||||
raise IOError(f"Can not read file in context: {full_path}") from exc
|
||||
else:
|
||||
# Directories, FIFOs, symlinks... do not need to be read.
|
||||
tarf.addfile(i, None)
|
||||
|
||||
for name, contents in extra_files:
|
||||
info = tarfile.TarInfo(name)
|
||||
contents_encoded = contents.encode("utf-8")
|
||||
info.size = len(contents_encoded)
|
||||
tarf.addfile(info, io.BytesIO(contents_encoded))
|
||||
|
||||
fileobj.seek(0)
|
||||
return fileobj
|
||||
|
||||
|
||||
def mkbuildcontext(dockerfile: io.BytesIO | t.IO[bytes]) -> t.IO[bytes]:
|
||||
# pylint: disable-next=consider-using-with
|
||||
f = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||
try:
|
||||
with tarfile.open(mode="w", fileobj=f) as tarf:
|
||||
if isinstance(dockerfile, io.StringIO): # type: ignore
|
||||
raise TypeError("Please use io.BytesIO to create in-memory Dockerfiles")
|
||||
if isinstance(dockerfile, io.BytesIO):
|
||||
dfinfo = tarfile.TarInfo("Dockerfile")
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
dockerfile.seek(0)
|
||||
else:
|
||||
dfinfo = tarf.gettarinfo(fileobj=dockerfile, arcname="Dockerfile")
|
||||
tarf.addfile(dfinfo, dockerfile)
|
||||
f.seek(0)
|
||||
except Exception: # noqa: E722
|
||||
f.close()
|
||||
raise
|
||||
return f
|
||||
|
||||
|
||||
def split_path(p: str) -> list[str]:
|
||||
return [pt for pt in re.split(_SEP, p) if pt and pt != "."]
|
||||
|
||||
|
||||
def normalize_slashes(p: str) -> str:
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return "/".join(split_path(p))
|
||||
return p
|
||||
|
||||
|
||||
def walk(root: str, patterns: Sequence[str], default: bool = True) -> t.Generator[str]:
|
||||
pm = PatternMatcher(patterns)
|
||||
return pm.walk(root)
|
||||
|
||||
|
||||
# Heavily based on
|
||||
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
|
||||
class PatternMatcher:
|
||||
def __init__(self, patterns: Sequence[str]) -> None:
|
||||
self.patterns = list(filter(lambda p: p.dirs, [Pattern(p) for p in patterns]))
|
||||
self.patterns.append(Pattern("!.dockerignore"))
|
||||
|
||||
def matches(self, filepath: str) -> bool:
|
||||
matched = False
|
||||
parent_path = os.path.dirname(filepath)
|
||||
parent_path_dirs = split_path(parent_path)
|
||||
|
||||
for pattern in self.patterns:
|
||||
negative = pattern.exclusion
|
||||
match = pattern.match(filepath)
|
||||
if (
|
||||
not match
|
||||
and parent_path != ""
|
||||
and len(pattern.dirs) <= len(parent_path_dirs)
|
||||
):
|
||||
match = pattern.match(
|
||||
os.path.sep.join(parent_path_dirs[: len(pattern.dirs)])
|
||||
)
|
||||
|
||||
if match:
|
||||
matched = not negative
|
||||
|
||||
return matched
|
||||
|
||||
def walk(self, root: str) -> t.Generator[str]:
|
||||
def rec_walk(current_dir: str) -> t.Generator[str]:
|
||||
for f in os.listdir(current_dir):
|
||||
fpath = os.path.join(os.path.relpath(current_dir, root), f)
|
||||
if fpath.startswith("." + os.path.sep):
|
||||
fpath = fpath[2:]
|
||||
match = self.matches(fpath)
|
||||
if not match:
|
||||
yield fpath
|
||||
|
||||
cur = os.path.join(root, fpath)
|
||||
if not os.path.isdir(cur) or os.path.islink(cur):
|
||||
continue
|
||||
|
||||
if match:
|
||||
# If we want to skip this file and it is a directory
|
||||
# then we should first check to see if there's an
|
||||
# excludes pattern (e.g. !dir/file) that starts with this
|
||||
# dir. If so then we cannot skip this dir.
|
||||
skip = True
|
||||
|
||||
for pat in self.patterns:
|
||||
if not pat.exclusion:
|
||||
continue
|
||||
if pat.cleaned_pattern.startswith(normalize_slashes(fpath)):
|
||||
skip = False
|
||||
break
|
||||
if skip:
|
||||
continue
|
||||
yield from rec_walk(cur)
|
||||
|
||||
return rec_walk(root)
|
||||
|
||||
|
||||
class Pattern:
|
||||
def __init__(self, pattern_str: str) -> None:
|
||||
self.exclusion = False
|
||||
if pattern_str.startswith("!"):
|
||||
self.exclusion = True
|
||||
pattern_str = pattern_str[1:]
|
||||
|
||||
self.dirs = self.normalize(pattern_str)
|
||||
self.cleaned_pattern = "/".join(self.dirs)
|
||||
|
||||
@classmethod
|
||||
def normalize(cls, p: str) -> list[str]:
|
||||
# Remove trailing spaces
|
||||
p = p.strip()
|
||||
|
||||
# Leading and trailing slashes are not relevant. Yes,
|
||||
# "foo.py/" must exclude the "foo.py" regular file. "."
|
||||
# components are not relevant either, even if the whole
|
||||
# pattern is only ".", as the Docker reference states: "For
|
||||
# historical reasons, the pattern . is ignored."
|
||||
# ".." component must be cleared with the potential previous
|
||||
# component, regardless of whether it exists: "A preprocessing
|
||||
# step [...] eliminates . and .. elements using Go's
|
||||
# filepath.".
|
||||
i = 0
|
||||
split = split_path(p)
|
||||
while i < len(split):
|
||||
if split[i] == "..":
|
||||
del split[i]
|
||||
if i > 0:
|
||||
del split[i - 1]
|
||||
i -= 1
|
||||
else:
|
||||
i += 1
|
||||
return split
|
||||
|
||||
def match(self, filepath: str) -> bool:
|
||||
return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
|
||||
|
||||
|
||||
def process_dockerfile(
|
||||
dockerfile: str | None, path: str
|
||||
) -> tuple[str, str | None] | tuple[None, None]:
|
||||
if not dockerfile:
|
||||
return (None, None)
|
||||
|
||||
abs_dockerfile = dockerfile
|
||||
if not os.path.isabs(dockerfile):
|
||||
abs_dockerfile = os.path.join(path, dockerfile)
|
||||
if IS_WINDOWS_PLATFORM and path.startswith(WINDOWS_LONGPATH_PREFIX):
|
||||
abs_dockerfile = f"{WINDOWS_LONGPATH_PREFIX}{os.path.normpath(abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX) :])}"
|
||||
if os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[
|
||||
0
|
||||
] or os.path.relpath(abs_dockerfile, path).startswith(".."):
|
||||
# Dockerfile not in context - read data to insert into tar later
|
||||
with open(abs_dockerfile, "rt", encoding="utf-8") as df:
|
||||
return (f".dockerfile.{random.getrandbits(160):x}", df.read())
|
||||
|
||||
# Dockerfile is inside the context - return path relative to context root
|
||||
if dockerfile == abs_dockerfile:
|
||||
# Only calculate relpath if necessary to avoid errors
|
||||
# on Windows client -> Linux Docker
|
||||
# see https://github.com/docker/compose/issues/5969
|
||||
dockerfile = os.path.relpath(abs_dockerfile, path)
|
||||
return (dockerfile, None)
|
||||
90
plugins/module_utils/_api/utils/config.py
Normal file
90
plugins/module_utils/_api/utils/config.py
Normal file
@ -0,0 +1,90 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import typing as t
|
||||
|
||||
from ..constants import IS_WINDOWS_PLATFORM
|
||||
|
||||
|
||||
DOCKER_CONFIG_FILENAME = os.path.join(".docker", "config.json")
|
||||
LEGACY_DOCKER_CONFIG_FILENAME = ".dockercfg"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_default_config_file() -> str:
|
||||
return os.path.join(home_dir(), DOCKER_CONFIG_FILENAME)
|
||||
|
||||
|
||||
def find_config_file(config_path: str | None = None) -> str | None:
|
||||
homedir = home_dir()
|
||||
paths = list(
|
||||
filter(
|
||||
None,
|
||||
[
|
||||
config_path, # 1
|
||||
config_path_from_environment(), # 2
|
||||
os.path.join(homedir, DOCKER_CONFIG_FILENAME), # 3
|
||||
os.path.join(homedir, LEGACY_DOCKER_CONFIG_FILENAME), # 4
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
log.debug("Trying paths: %s", repr(paths))
|
||||
|
||||
for path in paths:
|
||||
if os.path.exists(path):
|
||||
log.debug("Found file at path: %s", path)
|
||||
return path
|
||||
|
||||
log.debug("No config file found")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def config_path_from_environment() -> str | None:
|
||||
config_dir = os.environ.get("DOCKER_CONFIG")
|
||||
if not config_dir:
|
||||
return None
|
||||
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
|
||||
|
||||
|
||||
def home_dir() -> str:
|
||||
"""
|
||||
Get the user's home directory, using the same logic as the Docker Engine
|
||||
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
|
||||
"""
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return os.environ.get("USERPROFILE", "")
|
||||
return os.path.expanduser("~")
|
||||
|
||||
|
||||
def load_general_config(config_path: str | None = None) -> dict[str, t.Any]:
|
||||
config_file = find_config_file(config_path)
|
||||
|
||||
if not config_file:
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(config_file, "rt", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except (IOError, ValueError) as e:
|
||||
# In the case of a legacy `.dockercfg` file, we will not
|
||||
# be able to load any JSON data.
|
||||
log.debug(e)
|
||||
|
||||
log.debug("All parsing attempts failed - returning empty config")
|
||||
return {}
|
||||
68
plugins/module_utils/_api/utils/decorators.py
Normal file
68
plugins/module_utils/_api/utils/decorators.py
Normal file
@ -0,0 +1,68 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import typing as t
|
||||
|
||||
from .. import errors
|
||||
from . import utils
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
from ..api.client import APIClient
|
||||
|
||||
_Self = t.TypeVar("_Self")
|
||||
_P = t.ParamSpec("_P")
|
||||
_R = t.TypeVar("_R")
|
||||
|
||||
|
||||
def minimum_version(
|
||||
version: str,
|
||||
) -> Callable[
|
||||
[Callable[t.Concatenate[_Self, _P], _R]],
|
||||
Callable[t.Concatenate[_Self, _P], _R],
|
||||
]:
|
||||
def decorator(
|
||||
f: Callable[t.Concatenate[_Self, _P], _R],
|
||||
) -> Callable[t.Concatenate[_Self, _P], _R]:
|
||||
@functools.wraps(f)
|
||||
def wrapper(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||
# We use _Self instead of APIClient since this is used for mixins for APIClient.
|
||||
# This unfortunately means that self._version does not exist in the mixin,
|
||||
# it only exists after mixing in. This is why we ignore types here.
|
||||
if utils.version_lt(self._version, version): # type: ignore
|
||||
raise errors.InvalidVersion(
|
||||
f"{f.__name__} is not available for version < {version}"
|
||||
)
|
||||
return f(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def update_headers(
|
||||
f: Callable[t.Concatenate[APIClient, _P], _R],
|
||||
) -> Callable[t.Concatenate[APIClient, _P], _R]:
|
||||
def inner(self: APIClient, *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||
if "HttpHeaders" in self._general_configs:
|
||||
if not kwargs.get("headers"):
|
||||
kwargs["headers"] = self._general_configs["HttpHeaders"]
|
||||
else:
|
||||
# We cannot (yet) model that kwargs["headers"] should be a dictionary
|
||||
kwargs["headers"].update(self._general_configs["HttpHeaders"]) # type: ignore
|
||||
return f(self, *args, **kwargs)
|
||||
|
||||
return inner
|
||||
129
plugins/module_utils/_api/utils/fnmatch.py
Normal file
129
plugins/module_utils/_api/utils/fnmatch.py
Normal file
@ -0,0 +1,129 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
"""Filename matching with shell patterns.
|
||||
|
||||
fnmatch(FILENAME, PATTERN) matches according to the local convention.
|
||||
fnmatchcase(FILENAME, PATTERN) always takes case in account.
|
||||
|
||||
The functions operate by translating the pattern into a regular
|
||||
expression. They cache the compiled regular expressions for speed.
|
||||
|
||||
The function translate(PATTERN) returns a regular expression
|
||||
corresponding to PATTERN. (It does not compile it.)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
|
||||
__all__ = ["fnmatch", "fnmatchcase", "translate"]
|
||||
|
||||
_cache: dict[str, re.Pattern] = {}
|
||||
_MAXCACHE = 100
|
||||
|
||||
|
||||
def _purge() -> None:
|
||||
"""Clear the pattern cache"""
|
||||
_cache.clear()
|
||||
|
||||
|
||||
def fnmatch(name: str, pat: str) -> bool:
|
||||
"""Test whether FILENAME matches PATTERN.
|
||||
|
||||
Patterns are Unix shell style:
|
||||
|
||||
* matches everything
|
||||
? matches any single character
|
||||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
An initial period in FILENAME is not special.
|
||||
Both FILENAME and PATTERN are first case-normalized
|
||||
if the operating system requires it.
|
||||
If you do not want this, use fnmatchcase(FILENAME, PATTERN).
|
||||
"""
|
||||
|
||||
name = name.lower()
|
||||
pat = pat.lower()
|
||||
return fnmatchcase(name, pat)
|
||||
|
||||
|
||||
def fnmatchcase(name: str, pat: str) -> bool:
|
||||
"""Test whether FILENAME matches PATTERN, including case.
|
||||
This is a version of fnmatch() which does not case-normalize
|
||||
its arguments.
|
||||
"""
|
||||
|
||||
try:
|
||||
re_pat = _cache[pat]
|
||||
except KeyError:
|
||||
res = translate(pat)
|
||||
if len(_cache) >= _MAXCACHE:
|
||||
_cache.clear()
|
||||
_cache[pat] = re_pat = re.compile(res)
|
||||
return re_pat.match(name) is not None
|
||||
|
||||
|
||||
def translate(pat: str) -> str:
|
||||
"""Translate a shell PATTERN to a regular expression.
|
||||
|
||||
There is no way to quote meta-characters.
|
||||
"""
|
||||
i, n = 0, len(pat)
|
||||
res = "^"
|
||||
while i < n:
|
||||
c = pat[i]
|
||||
i = i + 1
|
||||
if c == "*":
|
||||
if i < n and pat[i] == "*":
|
||||
# is some flavor of "**"
|
||||
i = i + 1
|
||||
# Treat **/ as ** so eat the "/"
|
||||
if i < n and pat[i] == "/":
|
||||
i = i + 1
|
||||
if i >= n:
|
||||
# is "**EOF" - to align with .gitignore just accept all
|
||||
res = res + ".*"
|
||||
else:
|
||||
# is "**"
|
||||
# Note that this allows for any # of /'s (even 0) because
|
||||
# the .* will eat everything, even /'s
|
||||
res = res + "(.*/)?"
|
||||
else:
|
||||
# is "*" so map it to anything but "/"
|
||||
res = res + "[^/]*"
|
||||
elif c == "?":
|
||||
# "?" is any char except "/"
|
||||
res = res + "[^/]"
|
||||
elif c == "[":
|
||||
j = i
|
||||
if j < n and pat[j] == "!":
|
||||
j = j + 1
|
||||
if j < n and pat[j] == "]":
|
||||
j = j + 1
|
||||
while j < n and pat[j] != "]":
|
||||
j = j + 1
|
||||
if j >= n:
|
||||
res = res + "\\["
|
||||
else:
|
||||
stuff = pat[i:j].replace("\\", "\\\\")
|
||||
i = j + 1
|
||||
if stuff[0] == "!":
|
||||
stuff = "^" + stuff[1:]
|
||||
elif stuff[0] == "^":
|
||||
stuff = "\\" + stuff
|
||||
res = f"{res}[{stuff}]"
|
||||
else:
|
||||
res = res + re.escape(c)
|
||||
|
||||
return res + "$"
|
||||
101
plugins/module_utils/_api/utils/json_stream.py
Normal file
101
plugins/module_utils/_api/utils/json_stream.py
Normal file
@ -0,0 +1,101 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import json.decoder
|
||||
import typing as t
|
||||
|
||||
from ..errors import StreamParseError
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
import re
|
||||
from collections.abc import Callable
|
||||
|
||||
_T = t.TypeVar("_T")
|
||||
|
||||
|
||||
json_decoder = json.JSONDecoder()
|
||||
|
||||
|
||||
def stream_as_text(stream: t.Generator[bytes | str]) -> t.Generator[str]:
|
||||
"""
|
||||
Given a stream of bytes or text, if any of the items in the stream
|
||||
are bytes convert them to text.
|
||||
This function can be removed once we return text streams
|
||||
instead of byte streams.
|
||||
"""
|
||||
for data in stream:
|
||||
if not isinstance(data, str):
|
||||
data = data.decode("utf-8", "replace")
|
||||
yield data
|
||||
|
||||
|
||||
def json_splitter(buffer: str) -> tuple[t.Any, str] | None:
|
||||
"""Attempt to parse a json object from a buffer. If there is at least one
|
||||
object, return it and the rest of the buffer, otherwise return None.
|
||||
"""
|
||||
buffer = buffer.strip()
|
||||
try:
|
||||
obj, index = json_decoder.raw_decode(buffer)
|
||||
ws: re.Pattern = json.decoder.WHITESPACE # type: ignore[attr-defined]
|
||||
m = ws.match(buffer, index)
|
||||
rest = buffer[m.end() :] if m else buffer[index:]
|
||||
return obj, rest
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def json_stream(stream: t.Generator[str | bytes]) -> t.Generator[t.Any]:
|
||||
"""Given a stream of text, return a stream of json objects.
|
||||
This handles streams which are inconsistently buffered (some entries may
|
||||
be newline delimited, and others are not).
|
||||
"""
|
||||
return split_buffer(stream, json_splitter, json_decoder.decode)
|
||||
|
||||
|
||||
def line_splitter(buffer: str, separator: str = "\n") -> tuple[str, str] | None:
|
||||
index = buffer.find(str(separator))
|
||||
if index == -1:
|
||||
return None
|
||||
return buffer[: index + 1], buffer[index + 1 :]
|
||||
|
||||
|
||||
def split_buffer(
|
||||
stream: t.Generator[str | bytes],
|
||||
splitter: Callable[[str], tuple[_T, str] | None],
|
||||
decoder: Callable[[str], _T],
|
||||
) -> t.Generator[_T | str]:
|
||||
"""Given a generator which yields strings and a splitter function,
|
||||
joins all input, splits on the separator and yields each chunk.
|
||||
Unlike string.split(), each chunk includes the trailing
|
||||
separator, except for the last one if none was found on the end
|
||||
of the input.
|
||||
"""
|
||||
buffered = ""
|
||||
|
||||
for data in stream_as_text(stream):
|
||||
buffered += data
|
||||
while True:
|
||||
buffer_split = splitter(buffered)
|
||||
if buffer_split is None:
|
||||
break
|
||||
|
||||
item, buffered = buffer_split
|
||||
yield item
|
||||
|
||||
if buffered:
|
||||
try:
|
||||
yield decoder(buffered)
|
||||
except Exception as e:
|
||||
raise StreamParseError(e) from e
|
||||
137
plugins/module_utils/_api/utils/ports.py
Normal file
137
plugins/module_utils/_api/utils/ports.py
Normal file
@ -0,0 +1,137 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Collection, Sequence
|
||||
|
||||
|
||||
PORT_SPEC = re.compile(
|
||||
"^" # Match full string
|
||||
"(" # External part
|
||||
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
|
||||
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
|
||||
")?"
|
||||
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
|
||||
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
|
||||
"$" # Match full string
|
||||
)
|
||||
|
||||
|
||||
def add_port_mapping(
|
||||
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
|
||||
internal_port: str,
|
||||
external: str | tuple[str, str | None] | None,
|
||||
) -> None:
|
||||
if internal_port in port_bindings:
|
||||
port_bindings[internal_port].append(external)
|
||||
else:
|
||||
port_bindings[internal_port] = [external]
|
||||
|
||||
|
||||
def add_port(
|
||||
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
|
||||
internal_port_range: list[str],
|
||||
external_range: list[str] | list[tuple[str, str | None]] | None,
|
||||
) -> None:
|
||||
if external_range is None:
|
||||
for internal_port in internal_port_range:
|
||||
add_port_mapping(port_bindings, internal_port, None)
|
||||
else:
|
||||
for internal_port, external_port in zip(internal_port_range, external_range):
|
||||
# mypy loses the exact type of eternal_port elements for some reason...
|
||||
add_port_mapping(port_bindings, internal_port, external_port) # type: ignore
|
||||
|
||||
|
||||
def build_port_bindings(
|
||||
ports: Collection[str],
|
||||
) -> dict[str, list[str | tuple[str, str | None] | None]]:
|
||||
port_bindings: dict[str, list[str | tuple[str, str | None] | None]] = {}
|
||||
for port in ports:
|
||||
internal_port_range, external_range = split_port(port)
|
||||
add_port(port_bindings, internal_port_range, external_range)
|
||||
return port_bindings
|
||||
|
||||
|
||||
def _raise_invalid_port(port: str) -> t.NoReturn:
|
||||
raise ValueError(
|
||||
f'Invalid port "{port}", should be '
|
||||
"[[remote_ip:]remote_port[-remote_port]:]"
|
||||
"port[/protocol]"
|
||||
)
|
||||
|
||||
|
||||
@t.overload
|
||||
def port_range(
|
||||
start: str,
|
||||
end: str | None,
|
||||
proto: str,
|
||||
randomly_available_port: bool = False,
|
||||
) -> list[str]: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def port_range(
|
||||
start: str | None,
|
||||
end: str | None,
|
||||
proto: str,
|
||||
randomly_available_port: bool = False,
|
||||
) -> list[str] | None: ...
|
||||
|
||||
|
||||
def port_range(
|
||||
start: str | None,
|
||||
end: str | None,
|
||||
proto: str,
|
||||
randomly_available_port: bool = False,
|
||||
) -> list[str] | None:
|
||||
if start is None:
|
||||
return start
|
||||
if end is None:
|
||||
return [f"{start}{proto}"]
|
||||
if randomly_available_port:
|
||||
return [f"{start}-{end}{proto}"]
|
||||
return [f"{port}{proto}" for port in range(int(start), int(end) + 1)]
|
||||
|
||||
|
||||
def split_port(
|
||||
port: str | int,
|
||||
) -> tuple[list[str], list[str] | list[tuple[str, str | None]] | None]:
|
||||
port = str(port)
|
||||
match = PORT_SPEC.match(port)
|
||||
if match is None:
|
||||
_raise_invalid_port(port)
|
||||
parts = match.groupdict()
|
||||
|
||||
host: str | None = parts["host"]
|
||||
proto: str = parts["proto"] or ""
|
||||
int_p: str = parts["int"]
|
||||
ext_p: str = parts["ext"]
|
||||
internal: list[str] = port_range(int_p, parts["int_end"], proto) # type: ignore
|
||||
external = port_range(ext_p or None, parts["ext_end"], "", len(internal) == 1)
|
||||
|
||||
if host is None:
|
||||
if (external is not None and len(internal) != len(external)) or ext_p == "":
|
||||
raise ValueError("Port ranges don't match in length")
|
||||
return internal, external
|
||||
external_or_none: Sequence[str | None]
|
||||
if not external:
|
||||
external_or_none = [None] * len(internal)
|
||||
else:
|
||||
external_or_none = external
|
||||
if len(internal) != len(external_or_none):
|
||||
raise ValueError("Port ranges don't match in length")
|
||||
return internal, [(host, ext_port) for ext_port in external_or_none]
|
||||
98
plugins/module_utils/_api/utils/proxy.py
Normal file
98
plugins/module_utils/_api/utils/proxy.py
Normal file
@ -0,0 +1,98 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from .utils import format_environment
|
||||
|
||||
|
||||
class ProxyConfig(dict):
|
||||
"""
|
||||
Hold the client's proxy configuration
|
||||
"""
|
||||
|
||||
@property
|
||||
def http(self) -> str | None:
|
||||
return self.get("http")
|
||||
|
||||
@property
|
||||
def https(self) -> str | None:
|
||||
return self.get("https")
|
||||
|
||||
@property
|
||||
def ftp(self) -> str | None:
|
||||
return self.get("ftp")
|
||||
|
||||
@property
|
||||
def no_proxy(self) -> str | None:
|
||||
return self.get("no_proxy")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(config: dict[str, str]) -> ProxyConfig:
|
||||
"""
|
||||
Instantiate a new ProxyConfig from a dictionary that represents a
|
||||
client configuration, as described in `the documentation`_.
|
||||
|
||||
.. _the documentation:
|
||||
https://docs.docker.com/network/proxy/#configure-the-docker-client
|
||||
"""
|
||||
return ProxyConfig(
|
||||
http=config.get("httpProxy"),
|
||||
https=config.get("httpsProxy"),
|
||||
ftp=config.get("ftpProxy"),
|
||||
no_proxy=config.get("noProxy"),
|
||||
)
|
||||
|
||||
def get_environment(self) -> dict[str, str]:
|
||||
"""
|
||||
Return a dictionary representing the environment variables used to
|
||||
set the proxy settings.
|
||||
"""
|
||||
env = {}
|
||||
if self.http:
|
||||
env["http_proxy"] = env["HTTP_PROXY"] = self.http
|
||||
if self.https:
|
||||
env["https_proxy"] = env["HTTPS_PROXY"] = self.https
|
||||
if self.ftp:
|
||||
env["ftp_proxy"] = env["FTP_PROXY"] = self.ftp
|
||||
if self.no_proxy:
|
||||
env["no_proxy"] = env["NO_PROXY"] = self.no_proxy
|
||||
return env
|
||||
|
||||
@t.overload
|
||||
def inject_proxy_environment(self, environment: list[str]) -> list[str]: ...
|
||||
|
||||
@t.overload
|
||||
def inject_proxy_environment(
|
||||
self, environment: list[str] | None
|
||||
) -> list[str] | None: ...
|
||||
|
||||
def inject_proxy_environment(
|
||||
self, environment: list[str] | None
|
||||
) -> list[str] | None:
|
||||
"""
|
||||
Given a list of strings representing environment variables, prepend the
|
||||
environment variables corresponding to the proxy settings.
|
||||
"""
|
||||
if not self:
|
||||
return environment
|
||||
|
||||
proxy_env = format_environment(self.get_environment())
|
||||
if not environment:
|
||||
return proxy_env
|
||||
# It is important to prepend our variables, because we want the
|
||||
# variables defined in "environment" to take precedence.
|
||||
return proxy_env + environment
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"ProxyConfig(http={self.http}, https={self.https}, ftp={self.ftp}, no_proxy={self.no_proxy})"
|
||||
243
plugins/module_utils/_api/utils/socket.py
Normal file
243
plugins/module_utils/_api/utils/socket.py
Normal file
@ -0,0 +1,243 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import os
|
||||
import select
|
||||
import socket as pysocket
|
||||
import struct
|
||||
import typing as t
|
||||
|
||||
from ..transport.npipesocket import NpipeSocket
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Sequence
|
||||
|
||||
from ..._socket_helper import SocketLike
|
||||
|
||||
|
||||
STDOUT = 1
|
||||
STDERR = 2
|
||||
|
||||
|
||||
class SocketError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# NpipeSockets have their own error types
|
||||
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
|
||||
NPIPE_ENDED = 109
|
||||
|
||||
|
||||
def read(socket: SocketLike, n: int = 4096) -> bytes | None:
|
||||
"""
|
||||
Reads at most n bytes from socket
|
||||
"""
|
||||
|
||||
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
|
||||
|
||||
if not isinstance(socket, NpipeSocket): # type: ignore[unreachable]
|
||||
if not hasattr(select, "poll"):
|
||||
# Limited to 1024
|
||||
select.select([socket], [], [])
|
||||
else:
|
||||
poll = select.poll()
|
||||
poll.register(socket, select.POLLIN | select.POLLPRI)
|
||||
poll.poll()
|
||||
|
||||
try:
|
||||
if hasattr(socket, "recv"):
|
||||
return socket.recv(n)
|
||||
if isinstance(socket, pysocket.SocketIO): # type: ignore
|
||||
return socket.read(n) # type: ignore[unreachable]
|
||||
return os.read(socket.fileno(), n)
|
||||
except EnvironmentError as e:
|
||||
if e.errno not in recoverable_errors:
|
||||
raise
|
||||
return None # TODO ???
|
||||
except Exception as e:
|
||||
is_pipe_ended = (
|
||||
isinstance(socket, NpipeSocket) # type: ignore[unreachable]
|
||||
and len(e.args) > 0
|
||||
and e.args[0] == NPIPE_ENDED
|
||||
)
|
||||
if is_pipe_ended:
|
||||
# npipes do not support duplex sockets, so we interpret
|
||||
# a PIPE_ENDED error as a close operation (0-length read).
|
||||
return b""
|
||||
raise
|
||||
|
||||
|
||||
def read_exactly(socket: SocketLike, n: int) -> bytes:
|
||||
"""
|
||||
Reads exactly n bytes from socket
|
||||
Raises SocketError if there is not enough data
|
||||
"""
|
||||
data = b""
|
||||
while len(data) < n:
|
||||
next_data = read(socket, n - len(data))
|
||||
if not next_data:
|
||||
raise SocketError("Unexpected EOF")
|
||||
data += next_data
|
||||
return data
|
||||
|
||||
|
||||
def next_frame_header(socket: SocketLike) -> tuple[int, int]:
|
||||
"""
|
||||
Returns the stream and size of the next frame of data waiting to be read
|
||||
from socket, according to the protocol defined here:
|
||||
|
||||
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
|
||||
"""
|
||||
try:
|
||||
data = read_exactly(socket, 8)
|
||||
except SocketError:
|
||||
return (-1, -1)
|
||||
|
||||
stream, actual = struct.unpack(">BxxxL", data)
|
||||
return (stream, actual)
|
||||
|
||||
|
||||
def frames_iter(socket: SocketLike, tty: bool) -> t.Generator[tuple[int, bytes]]:
|
||||
"""
|
||||
Return a generator of frames read from socket. A frame is a tuple where
|
||||
the first item is the stream number and the second item is a chunk of data.
|
||||
|
||||
If the tty setting is enabled, the streams are multiplexed into the stdout
|
||||
stream.
|
||||
"""
|
||||
if tty:
|
||||
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
|
||||
return frames_iter_no_tty(socket)
|
||||
|
||||
|
||||
def frames_iter_no_tty(socket: SocketLike) -> t.Generator[tuple[int, bytes]]:
|
||||
"""
|
||||
Returns a generator of data read from the socket when the tty setting is
|
||||
not enabled.
|
||||
"""
|
||||
while True:
|
||||
(stream, n) = next_frame_header(socket)
|
||||
if n < 0:
|
||||
break
|
||||
while n > 0:
|
||||
result = read(socket, n)
|
||||
if result is None:
|
||||
continue
|
||||
data_length = len(result)
|
||||
if data_length == 0:
|
||||
# We have reached EOF
|
||||
return
|
||||
n -= data_length
|
||||
yield (stream, result)
|
||||
|
||||
|
||||
def frames_iter_tty(socket: SocketLike) -> t.Generator[bytes]:
|
||||
"""
|
||||
Return a generator of data read from the socket when the tty setting is
|
||||
enabled.
|
||||
"""
|
||||
while True:
|
||||
result = read(socket)
|
||||
if not result:
|
||||
# We have reached EOF
|
||||
return
|
||||
yield result
|
||||
|
||||
|
||||
@t.overload
|
||||
def consume_socket_output(
|
||||
frames: Sequence[bytes] | t.Generator[bytes], demux: t.Literal[False] = False
|
||||
) -> bytes: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def consume_socket_output(
|
||||
frames: (
|
||||
Sequence[tuple[bytes | None, bytes | None]]
|
||||
| t.Generator[tuple[bytes | None, bytes | None]]
|
||||
),
|
||||
demux: t.Literal[True],
|
||||
) -> tuple[bytes, bytes]: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def consume_socket_output(
|
||||
frames: (
|
||||
Sequence[bytes]
|
||||
| Sequence[tuple[bytes | None, bytes | None]]
|
||||
| t.Generator[bytes]
|
||||
| t.Generator[tuple[bytes | None, bytes | None]]
|
||||
),
|
||||
demux: bool = False,
|
||||
) -> bytes | tuple[bytes, bytes]: ...
|
||||
|
||||
|
||||
def consume_socket_output(
|
||||
frames: (
|
||||
Sequence[bytes]
|
||||
| Sequence[tuple[bytes | None, bytes | None]]
|
||||
| t.Generator[bytes]
|
||||
| t.Generator[tuple[bytes | None, bytes | None]]
|
||||
),
|
||||
demux: bool = False,
|
||||
) -> bytes | tuple[bytes, bytes]:
|
||||
"""
|
||||
Iterate through frames read from the socket and return the result.
|
||||
|
||||
Args:
|
||||
|
||||
demux (bool):
|
||||
If False, stdout and stderr are multiplexed, and the result is the
|
||||
concatenation of all the frames. If True, the streams are
|
||||
demultiplexed, and the result is a 2-tuple where each item is the
|
||||
concatenation of frames belonging to the same stream.
|
||||
"""
|
||||
if demux is False:
|
||||
# If the streams are multiplexed, the generator returns strings, that
|
||||
# we just need to concatenate.
|
||||
return b"".join(frames) # type: ignore
|
||||
|
||||
# If the streams are demultiplexed, the generator yields tuples
|
||||
# (stdout, stderr)
|
||||
out: list[bytes | None] = [None, None]
|
||||
frame: tuple[bytes | None, bytes | None]
|
||||
for frame in frames: # type: ignore
|
||||
# It is guaranteed that for each frame, one and only one stream
|
||||
# is not None.
|
||||
if frame == (None, None):
|
||||
raise AssertionError(f"frame must be (None, None), but got {frame}")
|
||||
if frame[0] is not None:
|
||||
if out[0] is None:
|
||||
out[0] = frame[0]
|
||||
else:
|
||||
out[0] += frame[0]
|
||||
else:
|
||||
if out[1] is None:
|
||||
out[1] = frame[1]
|
||||
else:
|
||||
out[1] += frame[1] # type: ignore[operator]
|
||||
return tuple(out) # type: ignore
|
||||
|
||||
|
||||
def demux_adaptor(stream_id: int, data: bytes) -> tuple[bytes | None, bytes | None]:
|
||||
"""
|
||||
Utility to demultiplex stdout and stderr when reading frames from the
|
||||
socket.
|
||||
"""
|
||||
if stream_id == STDOUT:
|
||||
return (data, None)
|
||||
if stream_id == STDERR:
|
||||
return (None, data)
|
||||
raise ValueError(f"{stream_id} is not a valid stream")
|
||||
520
plugins/module_utils/_api/utils/utils.py
Normal file
520
plugins/module_utils/_api/utils/utils.py
Normal file
@ -0,0 +1,520 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import shlex
|
||||
import string
|
||||
import typing as t
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
StrictVersion,
|
||||
)
|
||||
|
||||
from .. import errors
|
||||
from ..constants import (
|
||||
BYTE_UNITS,
|
||||
DEFAULT_HTTP_HOST,
|
||||
DEFAULT_NPIPE,
|
||||
DEFAULT_UNIX_SOCKET,
|
||||
)
|
||||
from ..tls import TLSConfig
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
|
||||
URLComponents = collections.namedtuple(
|
||||
"URLComponents",
|
||||
"scheme netloc url params query fragment",
|
||||
)
|
||||
|
||||
|
||||
def decode_json_header(header: str | bytes) -> dict[str, t.Any]:
|
||||
data = base64.b64decode(header).decode("utf-8")
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
def compare_version(v1: str, v2: str) -> t.Literal[-1, 0, 1]:
|
||||
"""Compare docker versions
|
||||
|
||||
>>> v1 = '1.9'
|
||||
>>> v2 = '1.10'
|
||||
>>> compare_version(v1, v2)
|
||||
1
|
||||
>>> compare_version(v2, v1)
|
||||
-1
|
||||
>>> compare_version(v2, v2)
|
||||
0
|
||||
"""
|
||||
s1 = StrictVersion(v1)
|
||||
s2 = StrictVersion(v2)
|
||||
if s1 == s2:
|
||||
return 0
|
||||
if s1 > s2:
|
||||
return -1
|
||||
return 1
|
||||
|
||||
|
||||
def version_lt(v1: str, v2: str) -> bool:
|
||||
return compare_version(v1, v2) > 0
|
||||
|
||||
|
||||
def version_gte(v1: str, v2: str) -> bool:
|
||||
return not version_lt(v1, v2)
|
||||
|
||||
|
||||
def _convert_port_binding(
|
||||
binding: (
|
||||
tuple[str, str | int | None]
|
||||
| tuple[str | int | None]
|
||||
| dict[str, str]
|
||||
| str
|
||||
| int
|
||||
),
|
||||
) -> dict[str, str]:
|
||||
result = {"HostIp": "", "HostPort": ""}
|
||||
host_port: str | int | None = ""
|
||||
if isinstance(binding, tuple):
|
||||
if len(binding) == 2:
|
||||
host_port = binding[1] # type: ignore
|
||||
result["HostIp"] = binding[0]
|
||||
elif isinstance(binding[0], str):
|
||||
result["HostIp"] = binding[0]
|
||||
else:
|
||||
host_port = binding[0]
|
||||
elif isinstance(binding, dict):
|
||||
if "HostPort" in binding:
|
||||
host_port = binding["HostPort"]
|
||||
if "HostIp" in binding:
|
||||
result["HostIp"] = binding["HostIp"]
|
||||
else:
|
||||
raise ValueError(binding)
|
||||
else:
|
||||
host_port = binding
|
||||
|
||||
result["HostPort"] = str(host_port) if host_port is not None else ""
|
||||
return result
|
||||
|
||||
|
||||
def convert_port_bindings(
|
||||
port_bindings: dict[
|
||||
str | int,
|
||||
tuple[str, str | int | None]
|
||||
| tuple[str | int | None]
|
||||
| dict[str, str]
|
||||
| str
|
||||
| int
|
||||
| list[
|
||||
tuple[str, str | int | None]
|
||||
| tuple[str | int | None]
|
||||
| dict[str, str]
|
||||
| str
|
||||
| int
|
||||
],
|
||||
],
|
||||
) -> dict[str, list[dict[str, str]]]:
|
||||
result = {}
|
||||
for k, v in port_bindings.items():
|
||||
key = str(k)
|
||||
if "/" not in key:
|
||||
key += "/tcp"
|
||||
if isinstance(v, list):
|
||||
result[key] = [_convert_port_binding(binding) for binding in v]
|
||||
else:
|
||||
result[key] = [_convert_port_binding(v)]
|
||||
return result
|
||||
|
||||
|
||||
def convert_volume_binds(
|
||||
binds: (
|
||||
list[str]
|
||||
| Mapping[
|
||||
str | bytes, dict[str, str | bytes] | dict[str, str] | bytes | str | int
|
||||
]
|
||||
),
|
||||
) -> list[str]:
|
||||
if isinstance(binds, list):
|
||||
return binds # type: ignore
|
||||
|
||||
result = []
|
||||
for k, v in binds.items():
|
||||
if isinstance(k, bytes):
|
||||
k = k.decode("utf-8")
|
||||
|
||||
if isinstance(v, dict):
|
||||
if "ro" in v and "mode" in v:
|
||||
raise ValueError(f'Binding cannot contain both "ro" and "mode": {v!r}')
|
||||
|
||||
bind = v["bind"]
|
||||
if isinstance(bind, bytes):
|
||||
bind = bind.decode("utf-8")
|
||||
|
||||
if "ro" in v:
|
||||
mode = "ro" if v["ro"] else "rw"
|
||||
elif "mode" in v:
|
||||
mode = v["mode"] # type: ignore # TODO
|
||||
else:
|
||||
mode = "rw"
|
||||
|
||||
# NOTE: this is only relevant for Linux hosts
|
||||
# (does not apply in Docker Desktop)
|
||||
propagation_modes = [
|
||||
"rshared",
|
||||
"shared",
|
||||
"rslave",
|
||||
"slave",
|
||||
"rprivate",
|
||||
"private",
|
||||
]
|
||||
if "propagation" in v and v["propagation"] in propagation_modes:
|
||||
if mode:
|
||||
mode = ",".join([mode, v["propagation"]]) # type: ignore # TODO
|
||||
else:
|
||||
mode = v["propagation"] # type: ignore # TODO
|
||||
|
||||
result.append(f"{k}:{bind}:{mode}")
|
||||
else:
|
||||
if isinstance(v, bytes):
|
||||
v = v.decode("utf-8")
|
||||
result.append(f"{k}:{v}:rw")
|
||||
return result
|
||||
|
||||
|
||||
def convert_tmpfs_mounts(tmpfs: dict[str, str] | list[str]) -> dict[str, str]:
|
||||
if isinstance(tmpfs, dict):
|
||||
return tmpfs
|
||||
|
||||
if not isinstance(tmpfs, list):
|
||||
raise ValueError(
|
||||
f"Expected tmpfs value to be either a list or a dict, found: {type(tmpfs).__name__}"
|
||||
)
|
||||
|
||||
result = {}
|
||||
for mount in tmpfs:
|
||||
if isinstance(mount, str):
|
||||
if ":" in mount:
|
||||
name, options = mount.split(":", 1)
|
||||
else:
|
||||
name = mount
|
||||
options = ""
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Expected item in tmpfs list to be a string, found: {type(mount).__name__}"
|
||||
)
|
||||
|
||||
result[name] = options
|
||||
return result
|
||||
|
||||
|
||||
def convert_service_networks(
|
||||
networks: list[str | dict[str, str]],
|
||||
) -> list[dict[str, str]]:
|
||||
if not networks:
|
||||
return networks # type: ignore
|
||||
if not isinstance(networks, list):
|
||||
raise TypeError("networks parameter must be a list.")
|
||||
|
||||
result = []
|
||||
for n in networks:
|
||||
if isinstance(n, str):
|
||||
n = {"Target": n}
|
||||
result.append(n)
|
||||
return result
|
||||
|
||||
|
||||
def parse_repository_tag(repo_name: str) -> tuple[str, str | None]:
|
||||
parts = repo_name.rsplit("@", 1)
|
||||
if len(parts) == 2:
|
||||
return tuple(parts) # type: ignore
|
||||
parts = repo_name.rsplit(":", 1)
|
||||
if len(parts) == 2 and "/" not in parts[1]:
|
||||
return tuple(parts) # type: ignore
|
||||
return repo_name, None
|
||||
|
||||
|
||||
def parse_host(addr: str | None, is_win32: bool = False, tls: bool = False) -> str:
|
||||
# Sensible defaults
|
||||
if not addr and is_win32:
|
||||
return DEFAULT_NPIPE
|
||||
if not addr or addr.strip() == "unix://":
|
||||
return DEFAULT_UNIX_SOCKET
|
||||
|
||||
addr = addr.strip()
|
||||
|
||||
parsed_url = urlparse(addr)
|
||||
proto = parsed_url.scheme
|
||||
if not proto or any(x not in string.ascii_letters + "+" for x in proto):
|
||||
# https://bugs.python.org/issue754016
|
||||
parsed_url = urlparse("//" + addr, "tcp")
|
||||
proto = "tcp"
|
||||
|
||||
if proto == "fd":
|
||||
raise errors.DockerException("fd protocol is not implemented")
|
||||
|
||||
# These protos are valid aliases for our library but not for the
|
||||
# official spec
|
||||
if proto in ("http", "https"):
|
||||
tls = proto == "https"
|
||||
proto = "tcp"
|
||||
elif proto == "http+unix":
|
||||
proto = "unix"
|
||||
|
||||
if proto not in ("tcp", "unix", "npipe", "ssh"):
|
||||
raise errors.DockerException(f"Invalid bind address protocol: {addr}")
|
||||
|
||||
if proto == "tcp" and not parsed_url.netloc:
|
||||
# "tcp://" is exceptionally disallowed by convention;
|
||||
# omitting a hostname for other protocols is fine
|
||||
raise errors.DockerException(f"Invalid bind address format: {addr}")
|
||||
|
||||
if any(
|
||||
[parsed_url.params, parsed_url.query, parsed_url.fragment, parsed_url.password]
|
||||
):
|
||||
raise errors.DockerException(f"Invalid bind address format: {addr}")
|
||||
|
||||
if parsed_url.path and proto == "ssh":
|
||||
raise errors.DockerException(
|
||||
f"Invalid bind address format: no path allowed for this protocol: {addr}"
|
||||
)
|
||||
path = parsed_url.path
|
||||
if proto == "unix" and parsed_url.hostname is not None:
|
||||
# For legacy reasons, we consider unix://path
|
||||
# to be valid and equivalent to unix:///path
|
||||
path = f"{parsed_url.hostname}/{path}"
|
||||
|
||||
netloc = parsed_url.netloc
|
||||
if proto in ("tcp", "ssh"):
|
||||
port = parsed_url.port or 0
|
||||
if port <= 0:
|
||||
port = 22 if proto == "ssh" else (2375 if tls else 2376)
|
||||
netloc = f"{parsed_url.netloc}:{port}"
|
||||
|
||||
if not parsed_url.hostname:
|
||||
netloc = f"{DEFAULT_HTTP_HOST}:{port}"
|
||||
|
||||
# Rewrite schemes to fit library internals (requests adapters)
|
||||
if proto == "tcp":
|
||||
proto = f"http{'s' if tls else ''}"
|
||||
elif proto == "unix":
|
||||
proto = "http+unix"
|
||||
|
||||
if proto in ("http+unix", "npipe"):
|
||||
return f"{proto}://{path}".rstrip("/")
|
||||
return urlunparse(
|
||||
URLComponents(
|
||||
scheme=proto,
|
||||
netloc=netloc,
|
||||
url=path,
|
||||
params="",
|
||||
query="",
|
||||
fragment="",
|
||||
)
|
||||
).rstrip("/")
|
||||
|
||||
|
||||
def parse_devices(devices: Sequence[dict[str, str] | str]) -> list[dict[str, str]]:
|
||||
device_list = []
|
||||
for device in devices:
|
||||
if isinstance(device, dict):
|
||||
device_list.append(device)
|
||||
continue
|
||||
if not isinstance(device, str):
|
||||
raise errors.DockerException(f"Invalid device type {type(device)}")
|
||||
device_mapping = device.split(":")
|
||||
if device_mapping:
|
||||
path_on_host = device_mapping[0]
|
||||
if len(device_mapping) > 1:
|
||||
path_in_container = device_mapping[1]
|
||||
else:
|
||||
path_in_container = path_on_host
|
||||
if len(device_mapping) > 2:
|
||||
permissions = device_mapping[2]
|
||||
else:
|
||||
permissions = "rwm"
|
||||
device_list.append(
|
||||
{
|
||||
"PathOnHost": path_on_host,
|
||||
"PathInContainer": path_in_container,
|
||||
"CgroupPermissions": permissions,
|
||||
}
|
||||
)
|
||||
return device_list
|
||||
|
||||
|
||||
def kwargs_from_env(
|
||||
assert_hostname: bool | None = None,
|
||||
environment: Mapping[str, str] | None = None,
|
||||
) -> dict[str, t.Any]:
|
||||
if not environment:
|
||||
environment = os.environ
|
||||
host = environment.get("DOCKER_HOST")
|
||||
|
||||
# empty string for cert path is the same as unset.
|
||||
cert_path = environment.get("DOCKER_CERT_PATH") or None
|
||||
|
||||
# empty string for tls verify counts as "false".
|
||||
# Any value or 'unset' counts as true.
|
||||
tls_verify_str = environment.get("DOCKER_TLS_VERIFY")
|
||||
if tls_verify_str == "":
|
||||
tls_verify = False
|
||||
else:
|
||||
tls_verify = tls_verify_str is not None
|
||||
enable_tls = cert_path or tls_verify
|
||||
|
||||
params: dict[str, t.Any] = {}
|
||||
|
||||
if host:
|
||||
params["base_url"] = host
|
||||
|
||||
if not enable_tls:
|
||||
return params
|
||||
|
||||
if not cert_path:
|
||||
cert_path = os.path.join(os.path.expanduser("~"), ".docker")
|
||||
|
||||
if not tls_verify and assert_hostname is None:
|
||||
# assert_hostname is a subset of TLS verification,
|
||||
# so if it is not set already then set it to false.
|
||||
assert_hostname = False
|
||||
|
||||
params["tls"] = TLSConfig(
|
||||
client_cert=(
|
||||
os.path.join(cert_path, "cert.pem"),
|
||||
os.path.join(cert_path, "key.pem"),
|
||||
),
|
||||
ca_cert=os.path.join(cert_path, "ca.pem"),
|
||||
verify=tls_verify,
|
||||
assert_hostname=assert_hostname,
|
||||
)
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def convert_filters(
|
||||
filters: Mapping[str, bool | str | int | list[int] | list[str] | list[str | int]],
|
||||
) -> str:
|
||||
result = {}
|
||||
for k, v in filters.items():
|
||||
if isinstance(v, bool):
|
||||
v = "true" if v else "false"
|
||||
if not isinstance(v, list):
|
||||
v = [
|
||||
v,
|
||||
]
|
||||
result[k] = [str(item) if not isinstance(item, str) else item for item in v]
|
||||
return json.dumps(result)
|
||||
|
||||
|
||||
def parse_bytes(s: int | float | str) -> int | float:
|
||||
if isinstance(s, (int, float)):
|
||||
return s
|
||||
if len(s) == 0:
|
||||
return 0
|
||||
|
||||
if s[-2:-1].isalpha() and s[-1].isalpha() and (s[-1] == "b" or s[-1] == "B"):
|
||||
s = s[:-1]
|
||||
units = BYTE_UNITS
|
||||
suffix = s[-1].lower()
|
||||
|
||||
# Check if the variable is a string representation of an int
|
||||
# without a units part. Assuming that the units are bytes.
|
||||
if suffix.isdigit():
|
||||
digits_part = s
|
||||
suffix = "b"
|
||||
else:
|
||||
digits_part = s[:-1]
|
||||
|
||||
if suffix in units or suffix.isdigit():
|
||||
try:
|
||||
digits = float(digits_part)
|
||||
except ValueError as exc:
|
||||
raise errors.DockerException(
|
||||
f"Failed converting the string value for memory ({digits_part}) to an integer."
|
||||
) from exc
|
||||
|
||||
# Reconvert to long for the final result
|
||||
s = int(digits * units[suffix])
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
f"The specified value for memory ({s}) should specify the units. The postfix should be one of the `b` `k` `m` `g` characters"
|
||||
)
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def normalize_links(links: dict[str, str] | Sequence[tuple[str, str]]) -> list[str]:
|
||||
if isinstance(links, dict):
|
||||
sorted_links = sorted(links.items())
|
||||
else:
|
||||
sorted_links = sorted(links)
|
||||
|
||||
return [f"{k}:{v}" if v else k for k, v in sorted_links]
|
||||
|
||||
|
||||
def parse_env_file(env_file: str | os.PathLike) -> dict[str, str]:
|
||||
"""
|
||||
Reads a line-separated environment file.
|
||||
The format of each line should be "key=value".
|
||||
"""
|
||||
environment = {}
|
||||
|
||||
with open(env_file, "rt", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
if line[0] == "#":
|
||||
continue
|
||||
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
parse_line = line.split("=", 1)
|
||||
if len(parse_line) == 2:
|
||||
k, v = parse_line
|
||||
environment[k] = v
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
f"Invalid line in environment file {env_file}:\n{line}"
|
||||
)
|
||||
|
||||
return environment
|
||||
|
||||
|
||||
def split_command(command: str) -> list[str]:
|
||||
return shlex.split(command)
|
||||
|
||||
|
||||
def format_environment(environment: Mapping[str, str | bytes | None]) -> list[str]:
|
||||
def format_env(key: str, value: str | bytes | None) -> str:
|
||||
if value is None:
|
||||
return key
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode("utf-8")
|
||||
|
||||
return f"{key}={value}"
|
||||
|
||||
return [format_env(*var) for var in environment.items()]
|
||||
|
||||
|
||||
def format_extra_hosts(extra_hosts: Mapping[str, str], task: bool = False) -> list[str]:
|
||||
# Use format dictated by Swarm API if container is part of a task
|
||||
if task:
|
||||
return [f"{v} {k}" for k, v in sorted(extra_hosts.items())]
|
||||
|
||||
return [f"{k}:{v}" for k, v in sorted(extra_hosts.items())]
|
||||
556
plugins/module_utils/_common.py
Normal file
556
plugins/module_utils/_common.py
Normal file
@ -0,0 +1,556 @@
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import typing as t
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DEFAULT_DOCKER_HOST,
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
DEFAULT_TLS,
|
||||
DEFAULT_TLS_VERIFY,
|
||||
DOCKER_COMMON_ARGS,
|
||||
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||
DOCKER_REQUIRED_TOGETHER,
|
||||
sanitize_result,
|
||||
update_tls_hostname,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
|
||||
HAS_DOCKER_PY_2 = False # pylint: disable=invalid-name
|
||||
HAS_DOCKER_PY_3 = False # pylint: disable=invalid-name
|
||||
HAS_DOCKER_ERROR: None | str # pylint: disable=invalid-name
|
||||
HAS_DOCKER_TRACEBACK: None | str # pylint: disable=invalid-name
|
||||
docker_version: str | None # pylint: disable=invalid-name
|
||||
|
||||
try:
|
||||
from docker import __version__ as docker_version
|
||||
from docker.errors import APIError, TLSParameterError
|
||||
from docker.tls import TLSConfig
|
||||
|
||||
if LooseVersion(docker_version) >= LooseVersion("3.0.0"):
|
||||
HAS_DOCKER_PY_3 = True # pylint: disable=invalid-name
|
||||
from docker import APIClient as Client
|
||||
elif LooseVersion(docker_version) >= LooseVersion("2.0.0"):
|
||||
HAS_DOCKER_PY_2 = True # pylint: disable=invalid-name
|
||||
from docker import APIClient as Client
|
||||
else:
|
||||
from docker import Client # type: ignore
|
||||
|
||||
except ImportError as exc:
|
||||
HAS_DOCKER_ERROR = str(exc) # pylint: disable=invalid-name
|
||||
HAS_DOCKER_TRACEBACK = traceback.format_exc() # pylint: disable=invalid-name
|
||||
HAS_DOCKER_PY = False # pylint: disable=invalid-name
|
||||
docker_version = None # pylint: disable=invalid-name
|
||||
else:
|
||||
HAS_DOCKER_PY = True # pylint: disable=invalid-name
|
||||
HAS_DOCKER_ERROR = None # pylint: disable=invalid-name
|
||||
HAS_DOCKER_TRACEBACK = None # pylint: disable=invalid-name
|
||||
|
||||
|
||||
try:
|
||||
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
|
||||
RequestException,
|
||||
)
|
||||
except ImportError:
|
||||
# Either Docker SDK for Python is no longer using requests, or Docker SDK for Python is not around either,
|
||||
# or Docker SDK for Python's dependency requests is missing. In any case, define an exception
|
||||
# class RequestException so that our code does not break.
|
||||
class RequestException(Exception): # type: ignore
|
||||
pass
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
|
||||
MIN_DOCKER_VERSION = "2.0.0"
|
||||
|
||||
|
||||
if not HAS_DOCKER_PY:
|
||||
# No Docker SDK for Python. Create a place holder client to allow
|
||||
# instantiation of AnsibleModule and proper error handing
|
||||
class Client: # type: ignore # noqa: F811, pylint: disable=function-redefined
|
||||
def __init__(self, **kwargs: t.Any) -> None:
|
||||
pass
|
||||
|
||||
class APIError(Exception): # type: ignore # noqa: F811, pylint: disable=function-redefined
|
||||
pass
|
||||
|
||||
class NotFound(Exception): # type: ignore # noqa: F811, pylint: disable=function-redefined
|
||||
pass
|
||||
|
||||
|
||||
def _get_tls_config(
|
||||
fail_function: Callable[[str], t.NoReturn], **kwargs: t.Any
|
||||
) -> TLSConfig:
|
||||
if "assert_hostname" in kwargs and LooseVersion(docker_version) >= LooseVersion(
|
||||
"7.0.0b1"
|
||||
):
|
||||
assert_hostname = kwargs.pop("assert_hostname")
|
||||
if assert_hostname is not None:
|
||||
fail_function(
|
||||
"tls_hostname is not compatible with Docker SDK for Python 7.0.0+. You are using"
|
||||
f" Docker SDK for Python {docker_version}. The tls_hostname option (value: {assert_hostname})"
|
||||
" has either been set directly or with the environment variable DOCKER_TLS_HOSTNAME."
|
||||
" Make sure it is not set, or switch to an older version of Docker SDK for Python."
|
||||
)
|
||||
# Filter out all None parameters
|
||||
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
try:
|
||||
return TLSConfig(**kwargs)
|
||||
except TLSParameterError as exc:
|
||||
fail_function(f"TLS config error: {exc}")
|
||||
|
||||
|
||||
def is_using_tls(auth_data: dict[str, t.Any]) -> bool:
|
||||
return auth_data["tls_verify"] or auth_data["tls"]
|
||||
|
||||
|
||||
def get_connect_params(
|
||||
auth_data: dict[str, t.Any], fail_function: Callable[[str], t.NoReturn]
|
||||
) -> dict[str, t.Any]:
|
||||
if is_using_tls(auth_data):
|
||||
auth_data["docker_host"] = auth_data["docker_host"].replace(
|
||||
"tcp://", "https://"
|
||||
)
|
||||
|
||||
result = {
|
||||
"base_url": auth_data["docker_host"],
|
||||
"version": auth_data["api_version"],
|
||||
"timeout": auth_data["timeout"],
|
||||
}
|
||||
|
||||
if auth_data["tls_verify"]:
|
||||
# TLS with verification
|
||||
tls_config = {
|
||||
"verify": True,
|
||||
"assert_hostname": auth_data["tls_hostname"],
|
||||
"fail_function": fail_function,
|
||||
}
|
||||
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||
if auth_data["cacert_path"]:
|
||||
tls_config["ca_cert"] = auth_data["cacert_path"]
|
||||
result["tls"] = _get_tls_config(**tls_config)
|
||||
elif auth_data["tls"]:
|
||||
# TLS without verification
|
||||
tls_config = {
|
||||
"verify": False,
|
||||
"fail_function": fail_function,
|
||||
}
|
||||
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||
result["tls"] = _get_tls_config(**tls_config)
|
||||
|
||||
if auth_data.get("use_ssh_client"):
|
||||
if LooseVersion(docker_version) < LooseVersion("4.4.0"):
|
||||
fail_function(
|
||||
"use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer"
|
||||
)
|
||||
result["use_ssh_client"] = True
|
||||
|
||||
# No TLS
|
||||
return result
|
||||
|
||||
|
||||
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = (
|
||||
"Try `pip uninstall docker-py` followed by `pip install docker`."
|
||||
)
|
||||
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
|
||||
|
||||
|
||||
class AnsibleDockerClientBase(Client):
|
||||
def __init__(
|
||||
self,
|
||||
min_docker_version: str | None = None,
|
||||
min_docker_api_version: str | None = None,
|
||||
) -> None:
|
||||
if min_docker_version is None:
|
||||
min_docker_version = MIN_DOCKER_VERSION
|
||||
|
||||
self.docker_py_version = LooseVersion(docker_version)
|
||||
|
||||
if not HAS_DOCKER_PY:
|
||||
msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0")
|
||||
msg = f"{msg}, for example via `pip install docker`. The error was: {HAS_DOCKER_ERROR}"
|
||||
self.fail(msg, exception=HAS_DOCKER_TRACEBACK)
|
||||
|
||||
if self.docker_py_version < LooseVersion(min_docker_version):
|
||||
msg = (
|
||||
f"Error: Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
|
||||
f" Minimum version required is {min_docker_version}."
|
||||
)
|
||||
if docker_version < LooseVersion("2.0"):
|
||||
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
|
||||
else:
|
||||
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
|
||||
self.fail(msg)
|
||||
|
||||
self._connect_params = get_connect_params(
|
||||
self.auth_params, fail_function=self.fail
|
||||
)
|
||||
|
||||
try:
|
||||
super().__init__(**self._connect_params)
|
||||
self.docker_api_version_str = self.api_version
|
||||
except APIError as exc:
|
||||
self.fail(f"Docker API error: {exc}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error connecting: {exc}")
|
||||
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or "1.25"
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
self.fail(
|
||||
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
|
||||
)
|
||||
|
||||
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
|
||||
pass
|
||||
# if self.debug:
|
||||
# from .util import log_debug
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
@abc.abstractmethod
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _get_value(
|
||||
param_name: str,
|
||||
param_value: t.Any,
|
||||
env_variable: str | None,
|
||||
default_value: t.Any | None,
|
||||
value_type: t.Literal["str", "bool", "int"] = "str",
|
||||
) -> t.Any:
|
||||
if param_value is not None:
|
||||
# take module parameter value
|
||||
if value_type == "bool":
|
||||
if param_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if param_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(param_value)
|
||||
if value_type == "int":
|
||||
return int(param_value)
|
||||
return param_value
|
||||
|
||||
if env_variable is not None:
|
||||
env_value = os.environ.get(env_variable)
|
||||
if env_value is not None:
|
||||
# take the env variable value
|
||||
if param_name == "cert_path":
|
||||
return os.path.join(env_value, "cert.pem")
|
||||
if param_name == "cacert_path":
|
||||
return os.path.join(env_value, "ca.pem")
|
||||
if param_name == "key_path":
|
||||
return os.path.join(env_value, "key.pem")
|
||||
if value_type == "bool":
|
||||
if env_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if env_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(env_value)
|
||||
if value_type == "int":
|
||||
return int(env_value)
|
||||
return env_value
|
||||
|
||||
# take the default
|
||||
return default_value
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def auth_params(self) -> dict[str, t.Any]:
|
||||
# Get authentication credentials.
|
||||
# Precedence: module parameters-> environment variables-> defaults.
|
||||
|
||||
self.log("Getting credentials")
|
||||
|
||||
client_params = self._get_params()
|
||||
|
||||
params = {}
|
||||
for key in DOCKER_COMMON_ARGS:
|
||||
params[key] = client_params.get(key)
|
||||
|
||||
result = {
|
||||
"docker_host": self._get_value(
|
||||
"docker_host",
|
||||
params["docker_host"],
|
||||
"DOCKER_HOST",
|
||||
DEFAULT_DOCKER_HOST,
|
||||
value_type="str",
|
||||
),
|
||||
"tls_hostname": self._get_value(
|
||||
"tls_hostname",
|
||||
params["tls_hostname"],
|
||||
"DOCKER_TLS_HOSTNAME",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"api_version": self._get_value(
|
||||
"api_version",
|
||||
params["api_version"],
|
||||
"DOCKER_API_VERSION",
|
||||
"auto",
|
||||
value_type="str",
|
||||
),
|
||||
"cacert_path": self._get_value(
|
||||
"cacert_path",
|
||||
params["ca_path"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"cert_path": self._get_value(
|
||||
"cert_path",
|
||||
params["client_cert"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"key_path": self._get_value(
|
||||
"key_path",
|
||||
params["client_key"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"tls": self._get_value(
|
||||
"tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool"
|
||||
),
|
||||
"tls_verify": self._get_value(
|
||||
"validate_certs",
|
||||
params["validate_certs"],
|
||||
"DOCKER_TLS_VERIFY",
|
||||
DEFAULT_TLS_VERIFY,
|
||||
value_type="bool",
|
||||
),
|
||||
"timeout": self._get_value(
|
||||
"timeout",
|
||||
params["timeout"],
|
||||
"DOCKER_TIMEOUT",
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
value_type="int",
|
||||
),
|
||||
"use_ssh_client": self._get_value(
|
||||
"use_ssh_client",
|
||||
params["use_ssh_client"],
|
||||
None,
|
||||
False,
|
||||
value_type="bool",
|
||||
),
|
||||
}
|
||||
|
||||
update_tls_hostname(result)
|
||||
|
||||
return result
|
||||
|
||||
def _handle_ssl_error(self, error: Exception) -> t.NoReturn:
|
||||
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
|
||||
if match:
|
||||
hostname = self.auth_params["tls_hostname"]
|
||||
self.fail(
|
||||
f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
|
||||
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
|
||||
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
|
||||
"setting the `tls` parameter to true."
|
||||
)
|
||||
self.fail(f"SSL Exception: {error}")
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(
|
||||
self,
|
||||
argument_spec: dict[str, t.Any] | None = None,
|
||||
supports_check_mode: bool = False,
|
||||
mutually_exclusive: Sequence[Sequence[str]] | None = None,
|
||||
required_together: Sequence[Sequence[str]] | None = None,
|
||||
required_if: (
|
||||
Sequence[
|
||||
tuple[str, t.Any, Sequence[str]]
|
||||
| tuple[str, t.Any, Sequence[str], bool]
|
||||
]
|
||||
| None
|
||||
) = None,
|
||||
required_one_of: Sequence[Sequence[str]] | None = None,
|
||||
required_by: dict[str, Sequence[str]] | None = None,
|
||||
min_docker_version: str | None = None,
|
||||
min_docker_api_version: str | None = None,
|
||||
option_minimal_versions: dict[str, t.Any] | None = None,
|
||||
option_minimal_versions_ignore_params: Sequence[str] | None = None,
|
||||
fail_results: dict[str, t.Any] | None = None,
|
||||
):
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = {}
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params: list[Sequence[str]] = []
|
||||
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params: list[Sequence[str]] = []
|
||||
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
required_by=required_by or {},
|
||||
)
|
||||
|
||||
self.debug = self.module.params.get("debug")
|
||||
self.check_mode = self.module.check_mode
|
||||
|
||||
super().__init__(
|
||||
min_docker_version=min_docker_version,
|
||||
min_docker_api_version=min_docker_api_version,
|
||||
)
|
||||
|
||||
if option_minimal_versions is not None:
|
||||
self._get_minimal_versions(
|
||||
option_minimal_versions, option_minimal_versions_ignore_params
|
||||
)
|
||||
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
self.module.deprecate(
|
||||
msg, version=version, date=date, collection_name=collection_name
|
||||
)
|
||||
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
return self.module.params
|
||||
|
||||
def _get_minimal_versions(
|
||||
self,
|
||||
option_minimal_versions: dict[str, t.Any],
|
||||
ignore_params: Sequence[str] | None = None,
|
||||
) -> None:
|
||||
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
|
||||
for option in self.module.argument_spec:
|
||||
if ignore_params is not None and option in ignore_params:
|
||||
continue
|
||||
self.option_minimal_versions[option] = {}
|
||||
self.option_minimal_versions.update(option_minimal_versions)
|
||||
|
||||
for option, data in self.option_minimal_versions.items():
|
||||
# Test whether option is supported, and store result
|
||||
support_docker_py = True
|
||||
support_docker_api = True
|
||||
if "docker_py_version" in data:
|
||||
support_docker_py = self.docker_py_version >= LooseVersion(
|
||||
data["docker_py_version"]
|
||||
)
|
||||
if "docker_api_version" in data:
|
||||
support_docker_api = self.docker_api_version >= LooseVersion(
|
||||
data["docker_api_version"]
|
||||
)
|
||||
data["supported"] = support_docker_py and support_docker_api
|
||||
# Fail if option is not supported but used
|
||||
if not data["supported"]:
|
||||
# Test whether option is specified
|
||||
if "detect_usage" in data:
|
||||
used = data["detect_usage"](self)
|
||||
else:
|
||||
used = self.module.params.get(option) is not None
|
||||
if used and "default" in self.module.argument_spec[option]:
|
||||
used = (
|
||||
self.module.params[option]
|
||||
!= self.module.argument_spec[option]["default"]
|
||||
)
|
||||
if used:
|
||||
# If the option is used, compose error message.
|
||||
if "usage_msg" in data:
|
||||
usg = data["usage_msg"]
|
||||
else:
|
||||
usg = f"set {option} option"
|
||||
if not support_docker_api:
|
||||
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
|
||||
elif not support_docker_py:
|
||||
msg = (
|
||||
f"Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
|
||||
f" Minimum version required is {data['docker_py_version']} to {usg}. {DOCKERPYUPGRADE_UPGRADE_DOCKER}"
|
||||
)
|
||||
else:
|
||||
# should not happen
|
||||
msg = f"Cannot {usg} with your configuration."
|
||||
self.fail(msg)
|
||||
|
||||
def report_warnings(
|
||||
self, result: t.Any, warnings_key: Sequence[str] | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Checks result of client operation for warnings, and if present, outputs them.
|
||||
|
||||
warnings_key should be a list of keys used to crawl the result dictionary.
|
||||
For example, if warnings_key == ['a', 'b'], the function will consider
|
||||
result['a']['b'] if these keys exist. If the result is a non-empty string, it
|
||||
will be reported as a warning. If the result is a list, every entry will be
|
||||
reported as a warning.
|
||||
|
||||
In most cases (if warnings are returned at all), warnings_key should be
|
||||
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
|
||||
"""
|
||||
if warnings_key is None:
|
||||
warnings_key = ["Warnings"]
|
||||
for key in warnings_key:
|
||||
if not isinstance(result, Mapping):
|
||||
return
|
||||
result = result.get(key)
|
||||
if isinstance(result, Sequence):
|
||||
for warning in result:
|
||||
self.module.warn(f"Docker warning: {warning}")
|
||||
elif isinstance(result, str) and result:
|
||||
self.module.warn(f"Docker warning: {result}")
|
||||
731
plugins/module_utils/_common_api.py
Normal file
731
plugins/module_utils/_common_api.py
Normal file
@ -0,0 +1,731 @@
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import os
|
||||
import re
|
||||
import typing as t
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
|
||||
RequestException,
|
||||
SSLError,
|
||||
)
|
||||
except ImportError:
|
||||
# Define an exception class RequestException so that our code does not break.
|
||||
class RequestException(Exception): # type: ignore
|
||||
pass
|
||||
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api import auth
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||
APIClient as Client,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
MissingRequirementException,
|
||||
NotFound,
|
||||
TLSParameterError,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.tls import TLSConfig
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
convert_filters,
|
||||
parse_repository_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DEFAULT_DOCKER_HOST,
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
DEFAULT_TLS,
|
||||
DEFAULT_TLS_VERIFY,
|
||||
DOCKER_COMMON_ARGS,
|
||||
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||
DOCKER_REQUIRED_TOGETHER,
|
||||
sanitize_result,
|
||||
update_tls_hostname,
|
||||
)
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
|
||||
def _get_tls_config(
|
||||
fail_function: Callable[[str], t.NoReturn], **kwargs: t.Any
|
||||
) -> TLSConfig:
|
||||
try:
|
||||
return TLSConfig(**kwargs)
|
||||
except TLSParameterError as exc:
|
||||
fail_function(f"TLS config error: {exc}")
|
||||
|
||||
|
||||
def is_using_tls(auth_data: dict[str, t.Any]) -> bool:
|
||||
return auth_data["tls_verify"] or auth_data["tls"]
|
||||
|
||||
|
||||
def get_connect_params(
|
||||
auth_data: dict[str, t.Any], fail_function: Callable[[str], t.NoReturn]
|
||||
) -> dict[str, t.Any]:
|
||||
if is_using_tls(auth_data):
|
||||
auth_data["docker_host"] = auth_data["docker_host"].replace(
|
||||
"tcp://", "https://"
|
||||
)
|
||||
|
||||
result = {
|
||||
"base_url": auth_data["docker_host"],
|
||||
"version": auth_data["api_version"],
|
||||
"timeout": auth_data["timeout"],
|
||||
}
|
||||
|
||||
if auth_data["tls_verify"]:
|
||||
# TLS with verification
|
||||
tls_config = {
|
||||
"verify": True,
|
||||
"assert_hostname": auth_data["tls_hostname"],
|
||||
"fail_function": fail_function,
|
||||
}
|
||||
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||
if auth_data["cacert_path"]:
|
||||
tls_config["ca_cert"] = auth_data["cacert_path"]
|
||||
result["tls"] = _get_tls_config(**tls_config)
|
||||
elif auth_data["tls"]:
|
||||
# TLS without verification
|
||||
tls_config = {
|
||||
"verify": False,
|
||||
"fail_function": fail_function,
|
||||
}
|
||||
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||
result["tls"] = _get_tls_config(**tls_config)
|
||||
|
||||
if auth_data.get("use_ssh_client"):
|
||||
result["use_ssh_client"] = True
|
||||
|
||||
# No TLS
|
||||
return result
|
||||
|
||||
|
||||
class AnsibleDockerClientBase(Client):
|
||||
def __init__(self, min_docker_api_version: str | None = None) -> None:
|
||||
self._connect_params = get_connect_params(
|
||||
self.auth_params, fail_function=self.fail
|
||||
)
|
||||
|
||||
try:
|
||||
super().__init__(**self._connect_params)
|
||||
self.docker_api_version_str = self.api_version
|
||||
except MissingRequirementException as exc:
|
||||
self.fail(
|
||||
missing_required_lib(exc.requirement), exception=exc.import_exception
|
||||
)
|
||||
except APIError as exc:
|
||||
self.fail(f"Docker API error: {exc}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error connecting: {exc}")
|
||||
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or "1.25"
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
self.fail(
|
||||
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
|
||||
)
|
||||
|
||||
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
|
||||
pass
|
||||
# if self.debug:
|
||||
# from .util import log_debug
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
@abc.abstractmethod
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _get_value(
|
||||
param_name: str,
|
||||
param_value: t.Any,
|
||||
env_variable: str | None,
|
||||
default_value: t.Any | None,
|
||||
value_type: t.Literal["str", "bool", "int"] = "str",
|
||||
) -> t.Any:
|
||||
if param_value is not None:
|
||||
# take module parameter value
|
||||
if value_type == "bool":
|
||||
if param_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if param_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(param_value)
|
||||
if value_type == "int":
|
||||
return int(param_value)
|
||||
return param_value
|
||||
|
||||
if env_variable is not None:
|
||||
env_value = os.environ.get(env_variable)
|
||||
if env_value is not None:
|
||||
# take the env variable value
|
||||
if param_name == "cert_path":
|
||||
return os.path.join(env_value, "cert.pem")
|
||||
if param_name == "cacert_path":
|
||||
return os.path.join(env_value, "ca.pem")
|
||||
if param_name == "key_path":
|
||||
return os.path.join(env_value, "key.pem")
|
||||
if value_type == "bool":
|
||||
if env_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if env_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(env_value)
|
||||
if value_type == "int":
|
||||
return int(env_value)
|
||||
return env_value
|
||||
|
||||
# take the default
|
||||
return default_value
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def auth_params(self) -> dict[str, t.Any]:
|
||||
# Get authentication credentials.
|
||||
# Precedence: module parameters-> environment variables-> defaults.
|
||||
|
||||
self.log("Getting credentials")
|
||||
|
||||
client_params = self._get_params()
|
||||
|
||||
params = {}
|
||||
for key in DOCKER_COMMON_ARGS:
|
||||
params[key] = client_params.get(key)
|
||||
|
||||
result = {
|
||||
"docker_host": self._get_value(
|
||||
"docker_host",
|
||||
params["docker_host"],
|
||||
"DOCKER_HOST",
|
||||
DEFAULT_DOCKER_HOST,
|
||||
value_type="str",
|
||||
),
|
||||
"tls_hostname": self._get_value(
|
||||
"tls_hostname",
|
||||
params["tls_hostname"],
|
||||
"DOCKER_TLS_HOSTNAME",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"api_version": self._get_value(
|
||||
"api_version",
|
||||
params["api_version"],
|
||||
"DOCKER_API_VERSION",
|
||||
"auto",
|
||||
value_type="str",
|
||||
),
|
||||
"cacert_path": self._get_value(
|
||||
"cacert_path",
|
||||
params["ca_path"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"cert_path": self._get_value(
|
||||
"cert_path",
|
||||
params["client_cert"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"key_path": self._get_value(
|
||||
"key_path",
|
||||
params["client_key"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"tls": self._get_value(
|
||||
"tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool"
|
||||
),
|
||||
"tls_verify": self._get_value(
|
||||
"validate_certs",
|
||||
params["validate_certs"],
|
||||
"DOCKER_TLS_VERIFY",
|
||||
DEFAULT_TLS_VERIFY,
|
||||
value_type="bool",
|
||||
),
|
||||
"timeout": self._get_value(
|
||||
"timeout",
|
||||
params["timeout"],
|
||||
"DOCKER_TIMEOUT",
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
value_type="int",
|
||||
),
|
||||
"use_ssh_client": self._get_value(
|
||||
"use_ssh_client",
|
||||
params["use_ssh_client"],
|
||||
None,
|
||||
False,
|
||||
value_type="bool",
|
||||
),
|
||||
}
|
||||
|
||||
def depr(*args: t.Any, **kwargs: t.Any) -> None:
|
||||
self.deprecate(*args, **kwargs)
|
||||
|
||||
update_tls_hostname(
|
||||
result,
|
||||
old_behavior=True,
|
||||
deprecate_function=depr,
|
||||
uses_tls=is_using_tls(result),
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def _handle_ssl_error(self, error: Exception) -> t.NoReturn:
|
||||
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
|
||||
if match:
|
||||
hostname = self.auth_params["tls_hostname"]
|
||||
self.fail(
|
||||
f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
|
||||
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
|
||||
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
|
||||
"setting the `tls` parameter to true."
|
||||
)
|
||||
self.fail(f"SSL Exception: {error}")
|
||||
|
||||
def get_container_by_id(self, container_id: str) -> dict[str, t.Any] | None:
|
||||
try:
|
||||
self.log(f"Inspecting container Id {container_id}")
|
||||
result = self.get_json("/containers/{0}/json", container_id)
|
||||
self.log("Completed container inspection")
|
||||
return result
|
||||
except NotFound:
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting container: {exc}")
|
||||
|
||||
def get_container(self, name: str | None) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup a container and return the inspection results.
|
||||
"""
|
||||
if name is None:
|
||||
return None
|
||||
|
||||
search_name = name
|
||||
if not name.startswith("/"):
|
||||
search_name = "/" + name
|
||||
|
||||
result = None
|
||||
try:
|
||||
params = {
|
||||
"limit": -1,
|
||||
"all": 1,
|
||||
"size": 0,
|
||||
"trunc_cmd": 0,
|
||||
}
|
||||
containers = self.get_json("/containers/json", params=params)
|
||||
for container in containers:
|
||||
self.log(f"testing container: {container['Names']}")
|
||||
if (
|
||||
isinstance(container["Names"], list)
|
||||
and search_name in container["Names"]
|
||||
):
|
||||
result = container
|
||||
break
|
||||
if container["Id"].startswith(name):
|
||||
result = container
|
||||
break
|
||||
if container["Id"] == name:
|
||||
result = container
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error retrieving container list: {exc}")
|
||||
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
return self.get_container_by_id(result["Id"])
|
||||
|
||||
def get_network(
|
||||
self, name: str | None = None, network_id: str | None = None
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup a network and return the inspection results.
|
||||
"""
|
||||
if name is None and network_id is None:
|
||||
return None
|
||||
|
||||
result = None
|
||||
|
||||
if network_id is None:
|
||||
try:
|
||||
networks = self.get_json("/networks")
|
||||
for network in networks:
|
||||
self.log(f"testing network: {network['Name']}")
|
||||
if name == network["Name"]:
|
||||
result = network
|
||||
break
|
||||
if network["Id"].startswith(name):
|
||||
result = network
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error retrieving network list: {exc}")
|
||||
|
||||
if result is not None:
|
||||
network_id = result["Id"]
|
||||
|
||||
if network_id is not None:
|
||||
try:
|
||||
self.log(f"Inspecting network Id {network_id}")
|
||||
result = self.get_json("/networks/{0}", network_id)
|
||||
self.log("Completed network inspection")
|
||||
except NotFound:
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting network: {exc}")
|
||||
|
||||
return result
|
||||
|
||||
def _image_lookup(self, name: str, tag: str | None) -> list[dict[str, t.Any]]:
|
||||
"""
|
||||
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||
does not work consistently. Instead, get the result set for name and manually check
|
||||
if the tag exists.
|
||||
"""
|
||||
try:
|
||||
params: dict[str, t.Any] = {
|
||||
"only_ids": 0,
|
||||
"all": 0,
|
||||
}
|
||||
if LooseVersion(self.api_version) < LooseVersion("1.25"):
|
||||
# only use "filter" on API 1.24 and under, as it is deprecated
|
||||
params["filter"] = name
|
||||
else:
|
||||
params["filters"] = convert_filters({"reference": name})
|
||||
images = self.get_json("/images/json", params=params)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error searching for image {name} - {exc}")
|
||||
if tag:
|
||||
lookup = f"{name}:{tag}"
|
||||
lookup_digest = f"{name}@{tag}"
|
||||
response = images
|
||||
images = []
|
||||
for image in response:
|
||||
tags = image.get("RepoTags")
|
||||
digests = image.get("RepoDigests")
|
||||
if (tags and lookup in tags) or (digests and lookup_digest in digests):
|
||||
images = [image]
|
||||
break
|
||||
return images
|
||||
|
||||
def find_image(self, name: str, tag: str | None) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by name and tag) and return the inspection results.
|
||||
"""
|
||||
if not name:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {name}:{tag}")
|
||||
images = self._image_lookup(name, tag)
|
||||
if not images:
|
||||
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||
registry, repo_name = auth.resolve_repository_name(name)
|
||||
if registry == "docker.io":
|
||||
# If docker.io is explicitly there in name, the image
|
||||
# is not found in some cases (#41509)
|
||||
self.log(f"Check for docker.io image: {repo_name}")
|
||||
images = self._image_lookup(repo_name, tag)
|
||||
if not images and repo_name.startswith("library/"):
|
||||
# Sometimes library/xxx images are not found
|
||||
lookup = repo_name[len("library/") :]
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images:
|
||||
# Last case for some Docker versions: if docker.io was not there,
|
||||
# it can be that the image was not found either
|
||||
# (https://github.com/ansible/ansible/pull/15586)
|
||||
lookup = f"{registry}/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images and "/" not in repo_name:
|
||||
# This seems to be happening with podman-docker
|
||||
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||
lookup = f"{registry}/library/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
|
||||
if len(images) > 1:
|
||||
self.fail(f"Daemon returned more than one result for {name}:{tag}")
|
||||
|
||||
if len(images) == 1:
|
||||
try:
|
||||
return self.get_json("/images/{0}/json", images[0]["Id"])
|
||||
except NotFound:
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting image {name}:{tag} - {exc}")
|
||||
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
|
||||
def find_image_by_id(
|
||||
self, image_id: str, accept_missing_image: bool = False
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by ID) and return the inspection results.
|
||||
"""
|
||||
if not image_id:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {image_id} (by ID)")
|
||||
try:
|
||||
return self.get_json("/images/{0}/json", image_id)
|
||||
except NotFound as exc:
|
||||
if not accept_missing_image:
|
||||
self.fail(f"Error inspecting image ID {image_id} - {exc}")
|
||||
self.log(f"Image {image_id} not found.")
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting image ID {image_id} - {exc}")
|
||||
|
||||
@staticmethod
|
||||
def _compare_images(
|
||||
img1: dict[str, t.Any] | None, img2: dict[str, t.Any] | None
|
||||
) -> bool:
|
||||
if img1 is None or img2 is None:
|
||||
return img1 == img2
|
||||
filter_keys = {"Metadata"}
|
||||
img1_filtered = {k: v for k, v in img1.items() if k not in filter_keys}
|
||||
img2_filtered = {k: v for k, v in img2.items() if k not in filter_keys}
|
||||
return img1_filtered == img2_filtered
|
||||
|
||||
def pull_image(
|
||||
self, name: str, tag: str = "latest", image_platform: str | None = None
|
||||
) -> tuple[dict[str, t.Any] | None, bool]:
|
||||
"""
|
||||
Pull an image
|
||||
"""
|
||||
self.log(f"Pulling image {name}:{tag}")
|
||||
old_image = self.find_image(name, tag)
|
||||
try:
|
||||
repository, image_tag = parse_repository_tag(name)
|
||||
registry, dummy_repo_name = auth.resolve_repository_name(repository)
|
||||
params = {
|
||||
"tag": tag or image_tag or "latest",
|
||||
"fromImage": repository,
|
||||
}
|
||||
if image_platform is not None:
|
||||
params["platform"] = image_platform
|
||||
|
||||
headers = {}
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers["X-Registry-Auth"] = header
|
||||
|
||||
response = self._post(
|
||||
self._url("/images/create"),
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
)
|
||||
self._raise_for_status(response)
|
||||
for line in self._stream_helper(response, decode=True):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get("error"):
|
||||
if line.get("errorDetail"):
|
||||
error_detail = line.get("errorDetail")
|
||||
self.fail(
|
||||
f"Error pulling {name} - code: {error_detail.get('code')} message: {error_detail.get('message')}"
|
||||
)
|
||||
else:
|
||||
self.fail(f"Error pulling {name} - {line.get('error')}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error pulling image {name}:{tag} - {exc}")
|
||||
|
||||
new_image = self.find_image(name, tag)
|
||||
|
||||
return new_image, self._compare_images(old_image, new_image)
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(
|
||||
self,
|
||||
argument_spec: dict[str, t.Any] | None = None,
|
||||
supports_check_mode: bool = False,
|
||||
mutually_exclusive: Sequence[Sequence[str]] | None = None,
|
||||
required_together: Sequence[Sequence[str]] | None = None,
|
||||
required_if: (
|
||||
Sequence[
|
||||
tuple[str, t.Any, Sequence[str]]
|
||||
| tuple[str, t.Any, Sequence[str], bool]
|
||||
]
|
||||
| None
|
||||
) = None,
|
||||
required_one_of: Sequence[Sequence[str]] | None = None,
|
||||
required_by: dict[str, Sequence[str]] | None = None,
|
||||
min_docker_api_version: str | None = None,
|
||||
option_minimal_versions: dict[str, t.Any] | None = None,
|
||||
option_minimal_versions_ignore_params: Sequence[str] | None = None,
|
||||
fail_results: dict[str, t.Any] | None = None,
|
||||
):
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = {}
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params: list[Sequence[str]] = []
|
||||
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params: list[Sequence[str]] = []
|
||||
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
required_by=required_by or {},
|
||||
)
|
||||
|
||||
self.debug = self.module.params.get("debug")
|
||||
self.check_mode = self.module.check_mode
|
||||
|
||||
super().__init__(min_docker_api_version=min_docker_api_version)
|
||||
|
||||
if option_minimal_versions is not None:
|
||||
self._get_minimal_versions(
|
||||
option_minimal_versions, option_minimal_versions_ignore_params
|
||||
)
|
||||
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
self.module.deprecate(
|
||||
msg, version=version, date=date, collection_name=collection_name
|
||||
)
|
||||
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
return self.module.params
|
||||
|
||||
def _get_minimal_versions(
|
||||
self,
|
||||
option_minimal_versions: dict[str, t.Any],
|
||||
ignore_params: Sequence[str] | None = None,
|
||||
) -> None:
|
||||
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
|
||||
for option in self.module.argument_spec:
|
||||
if ignore_params is not None and option in ignore_params:
|
||||
continue
|
||||
self.option_minimal_versions[option] = {}
|
||||
self.option_minimal_versions.update(option_minimal_versions)
|
||||
|
||||
for option, data in self.option_minimal_versions.items():
|
||||
# Test whether option is supported, and store result
|
||||
support_docker_api = True
|
||||
if "docker_api_version" in data:
|
||||
support_docker_api = self.docker_api_version >= LooseVersion(
|
||||
data["docker_api_version"]
|
||||
)
|
||||
data["supported"] = support_docker_api
|
||||
# Fail if option is not supported but used
|
||||
if not data["supported"]:
|
||||
# Test whether option is specified
|
||||
if "detect_usage" in data:
|
||||
used = data["detect_usage"](self)
|
||||
else:
|
||||
used = self.module.params.get(option) is not None
|
||||
if used and "default" in self.module.argument_spec[option]:
|
||||
used = (
|
||||
self.module.params[option]
|
||||
!= self.module.argument_spec[option]["default"]
|
||||
)
|
||||
if used:
|
||||
# If the option is used, compose error message.
|
||||
if "usage_msg" in data:
|
||||
usg = data["usage_msg"]
|
||||
else:
|
||||
usg = f"set {option} option"
|
||||
if not support_docker_api:
|
||||
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
|
||||
else:
|
||||
# should not happen
|
||||
msg = f"Cannot {usg} with your configuration."
|
||||
self.fail(msg)
|
||||
|
||||
def report_warnings(
|
||||
self, result: t.Any, warnings_key: Sequence[str] | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Checks result of client operation for warnings, and if present, outputs them.
|
||||
|
||||
warnings_key should be a list of keys used to crawl the result dictionary.
|
||||
For example, if warnings_key == ['a', 'b'], the function will consider
|
||||
result['a']['b'] if these keys exist. If the result is a non-empty string, it
|
||||
will be reported as a warning. If the result is a list, every entry will be
|
||||
reported as a warning.
|
||||
|
||||
In most cases (if warnings are returned at all), warnings_key should be
|
||||
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
|
||||
"""
|
||||
if warnings_key is None:
|
||||
warnings_key = ["Warnings"]
|
||||
for key in warnings_key:
|
||||
if not isinstance(result, Mapping):
|
||||
return
|
||||
result = result.get(key)
|
||||
if isinstance(result, Sequence):
|
||||
for warning in result:
|
||||
self.module.warn(f"Docker warning: {warning}")
|
||||
elif isinstance(result, str) and result:
|
||||
self.module.warn(f"Docker warning: {result}")
|
||||
490
plugins/module_utils/_common_cli.py
Normal file
490
plugins/module_utils/_common_cli.py
Normal file
@ -0,0 +1,490 @@
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import json
|
||||
import shlex
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.auth import (
|
||||
resolve_repository_name,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DEFAULT_DOCKER_HOST,
|
||||
DEFAULT_TLS,
|
||||
DEFAULT_TLS_VERIFY,
|
||||
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||
DOCKER_REQUIRED_TOGETHER,
|
||||
sanitize_result,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
|
||||
DOCKER_COMMON_ARGS = {
|
||||
"docker_cli": {"type": "path"},
|
||||
"docker_host": {
|
||||
"type": "str",
|
||||
"fallback": (env_fallback, ["DOCKER_HOST"]),
|
||||
"aliases": ["docker_url"],
|
||||
},
|
||||
"tls_hostname": {
|
||||
"type": "str",
|
||||
"fallback": (env_fallback, ["DOCKER_TLS_HOSTNAME"]),
|
||||
},
|
||||
"api_version": {
|
||||
"type": "str",
|
||||
"default": "auto",
|
||||
"fallback": (env_fallback, ["DOCKER_API_VERSION"]),
|
||||
"aliases": ["docker_api_version"],
|
||||
},
|
||||
"ca_path": {"type": "path", "aliases": ["ca_cert", "tls_ca_cert", "cacert_path"]},
|
||||
"client_cert": {"type": "path", "aliases": ["tls_client_cert", "cert_path"]},
|
||||
"client_key": {"type": "path", "aliases": ["tls_client_key", "key_path"]},
|
||||
"tls": {
|
||||
"type": "bool",
|
||||
"default": DEFAULT_TLS,
|
||||
"fallback": (env_fallback, ["DOCKER_TLS"]),
|
||||
},
|
||||
"validate_certs": {
|
||||
"type": "bool",
|
||||
"default": DEFAULT_TLS_VERIFY,
|
||||
"fallback": (env_fallback, ["DOCKER_TLS_VERIFY"]),
|
||||
"aliases": ["tls_verify"],
|
||||
},
|
||||
# "debug": {"type": "bool", "default: False},
|
||||
"cli_context": {"type": "str"},
|
||||
}
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleDockerClientBase:
|
||||
docker_api_version_str: str | None
|
||||
docker_api_version: LooseVersion | None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
common_args: dict[str, t.Any],
|
||||
min_docker_api_version: str | None = None,
|
||||
needs_api_version: bool = True,
|
||||
) -> None:
|
||||
self._environment: dict[str, str] = {}
|
||||
if common_args["tls_hostname"]:
|
||||
self._environment["DOCKER_TLS_HOSTNAME"] = common_args["tls_hostname"]
|
||||
if common_args["api_version"] and common_args["api_version"] != "auto":
|
||||
self._environment["DOCKER_API_VERSION"] = common_args["api_version"]
|
||||
cli = common_args.get("docker_cli")
|
||||
if cli is None:
|
||||
try:
|
||||
cli = get_bin_path("docker")
|
||||
except ValueError:
|
||||
self.fail(
|
||||
"Cannot find docker CLI in path. Please provide it explicitly with the docker_cli parameter"
|
||||
)
|
||||
self._cli = cli
|
||||
self._cli_base = [self._cli]
|
||||
docker_host = common_args["docker_host"]
|
||||
if not docker_host and not common_args["cli_context"]:
|
||||
docker_host = DEFAULT_DOCKER_HOST
|
||||
if docker_host:
|
||||
self._cli_base.extend(["--host", docker_host])
|
||||
if common_args["validate_certs"]:
|
||||
self._cli_base.append("--tlsverify")
|
||||
elif common_args["tls"]:
|
||||
self._cli_base.append("--tls")
|
||||
if common_args["ca_path"]:
|
||||
self._cli_base.extend(["--tlscacert", common_args["ca_path"]])
|
||||
if common_args["client_cert"]:
|
||||
self._cli_base.extend(["--tlscert", common_args["client_cert"]])
|
||||
if common_args["client_key"]:
|
||||
self._cli_base.extend(["--tlskey", common_args["client_key"]])
|
||||
if common_args["cli_context"]:
|
||||
self._cli_base.extend(["--context", common_args["cli_context"]])
|
||||
|
||||
# `--format json` was only added as a shorthand for `--format {{ json . }}` in Docker 23.0
|
||||
dummy, self._version, dummy2 = self.call_cli_json(
|
||||
"version", "--format", "{{ json . }}", check_rc=True
|
||||
)
|
||||
self._info: dict[str, t.Any] | None = None
|
||||
|
||||
if needs_api_version:
|
||||
api_version_string = self._version["Server"].get(
|
||||
"ApiVersion"
|
||||
) or self._version["Server"].get("APIVersion")
|
||||
if not isinstance(self._version.get("Server"), dict) or not isinstance(
|
||||
api_version_string, str
|
||||
):
|
||||
self.fail(
|
||||
"Cannot determine Docker Daemon information. Are you maybe using podman instead of docker?"
|
||||
)
|
||||
self.docker_api_version_str = to_text(api_version_string)
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or "1.25"
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
self.fail(
|
||||
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
|
||||
)
|
||||
else:
|
||||
self.docker_api_version_str = None
|
||||
self.docker_api_version = None
|
||||
if min_docker_api_version is not None:
|
||||
self.fail(
|
||||
"Internal error: cannot have needs_api_version=False with min_docker_api_version not None"
|
||||
)
|
||||
|
||||
def log(self, msg: str, pretty_print: bool = False) -> None:
|
||||
pass
|
||||
# if self.debug:
|
||||
# from .util import log_debug
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
def get_cli(self) -> str:
|
||||
return self._cli
|
||||
|
||||
def get_version_info(self) -> str:
|
||||
return self._version
|
||||
|
||||
def _compose_cmd(self, args: t.Sequence[str]) -> list[str]:
|
||||
return self._cli_base + list(args)
|
||||
|
||||
def _compose_cmd_str(self, args: t.Sequence[str]) -> str:
|
||||
return " ".join(shlex.quote(a) for a in self._compose_cmd(args))
|
||||
|
||||
@abc.abstractmethod
|
||||
def call_cli(
|
||||
self,
|
||||
*args: str,
|
||||
check_rc: bool = False,
|
||||
data: bytes | None = None,
|
||||
cwd: str | None = None,
|
||||
environ_update: dict[str, str] | None = None,
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
pass
|
||||
|
||||
def call_cli_json(
|
||||
self,
|
||||
*args: str,
|
||||
check_rc: bool = False,
|
||||
data: bytes | None = None,
|
||||
cwd: str | None = None,
|
||||
environ_update: dict[str, str] | None = None,
|
||||
warn_on_stderr: bool = False,
|
||||
) -> tuple[int, t.Any, bytes]:
|
||||
rc, stdout, stderr = self.call_cli(
|
||||
*args, check_rc=check_rc, data=data, cwd=cwd, environ_update=environ_update
|
||||
)
|
||||
if warn_on_stderr and stderr:
|
||||
self.warn(to_text(stderr))
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(
|
||||
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
|
||||
cmd=self._compose_cmd_str(args),
|
||||
rc=rc,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
)
|
||||
return rc, data, stderr
|
||||
|
||||
def call_cli_json_stream(
|
||||
self,
|
||||
*args: str,
|
||||
check_rc: bool = False,
|
||||
data: bytes | None = None,
|
||||
cwd: str | None = None,
|
||||
environ_update: dict[str, str] | None = None,
|
||||
warn_on_stderr: bool = False,
|
||||
) -> tuple[int, list[t.Any], bytes]:
|
||||
rc, stdout, stderr = self.call_cli(
|
||||
*args, check_rc=check_rc, data=data, cwd=cwd, environ_update=environ_update
|
||||
)
|
||||
if warn_on_stderr and stderr:
|
||||
self.warn(to_text(stderr))
|
||||
result = []
|
||||
try:
|
||||
for line in stdout.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith(b"{"):
|
||||
result.append(json.loads(line))
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(
|
||||
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
|
||||
cmd=self._compose_cmd_str(args),
|
||||
rc=rc,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
)
|
||||
return rc, result, stderr
|
||||
|
||||
@abc.abstractmethod
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def warn(self, msg: str) -> None:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def get_cli_info(self) -> dict[str, t.Any]:
|
||||
if self._info is None:
|
||||
dummy, self._info, dummy2 = self.call_cli_json(
|
||||
"info", "--format", "{{ json . }}", check_rc=True
|
||||
)
|
||||
return self._info
|
||||
|
||||
def get_client_plugin_info(self, component: str) -> dict[str, t.Any] | None:
|
||||
cli_info = self.get_cli_info()
|
||||
if not isinstance(cli_info.get("ClientInfo"), dict):
|
||||
self.fail(
|
||||
"Cannot determine Docker client information. Are you maybe using podman instead of docker?"
|
||||
)
|
||||
for plugin in cli_info["ClientInfo"].get("Plugins") or []:
|
||||
if plugin.get("Name") == component:
|
||||
return plugin
|
||||
return None
|
||||
|
||||
def _image_lookup(self, name: str, tag: str) -> list[dict[str, t.Any]]:
|
||||
"""
|
||||
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||
does not work consistently. Instead, get the result set for name and manually check
|
||||
if the tag exists.
|
||||
"""
|
||||
dummy, images, dummy2 = self.call_cli_json_stream(
|
||||
"image",
|
||||
"ls",
|
||||
"--format",
|
||||
"{{ json . }}",
|
||||
"--no-trunc",
|
||||
"--filter",
|
||||
f"reference={name}",
|
||||
check_rc=True,
|
||||
)
|
||||
if tag:
|
||||
response = images
|
||||
images = []
|
||||
for image in response:
|
||||
if image.get("Tag") == tag or image.get("Digest") == tag:
|
||||
images = [image]
|
||||
break
|
||||
return images
|
||||
|
||||
@t.overload
|
||||
def find_image(self, name: None, tag: str) -> None: ...
|
||||
|
||||
@t.overload
|
||||
def find_image(self, name: str, tag: str) -> dict[str, t.Any] | None: ...
|
||||
|
||||
def find_image(self, name: str | None, tag: str) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by name and tag) and return the inspection results.
|
||||
"""
|
||||
if not name:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {name}:{tag}")
|
||||
images = self._image_lookup(name, tag)
|
||||
if not images:
|
||||
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||
registry, repo_name = resolve_repository_name(name)
|
||||
if registry == "docker.io":
|
||||
# If docker.io is explicitly there in name, the image
|
||||
# is not found in some cases (#41509)
|
||||
self.log(f"Check for docker.io image: {repo_name}")
|
||||
images = self._image_lookup(repo_name, tag)
|
||||
if not images and repo_name.startswith("library/"):
|
||||
# Sometimes library/xxx images are not found
|
||||
lookup = repo_name[len("library/") :]
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images:
|
||||
# Last case for some Docker versions: if docker.io was not there,
|
||||
# it can be that the image was not found either
|
||||
# (https://github.com/ansible/ansible/pull/15586)
|
||||
lookup = f"{registry}/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images and "/" not in repo_name:
|
||||
# This seems to be happening with podman-docker
|
||||
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||
lookup = f"{registry}/library/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
|
||||
if len(images) > 1:
|
||||
self.fail(f"Daemon returned more than one result for {name}:{tag}")
|
||||
|
||||
if len(images) == 1:
|
||||
rc, image, stderr = self.call_cli_json("image", "inspect", images[0]["ID"])
|
||||
if not image:
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
if rc != 0:
|
||||
self.fail(f"Error inspecting image {name}:{tag} - {to_text(stderr)}")
|
||||
return image[0]
|
||||
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
|
||||
@t.overload
|
||||
def find_image_by_id(
|
||||
self, image_id: None, accept_missing_image: bool = False
|
||||
) -> None: ...
|
||||
|
||||
@t.overload
|
||||
def find_image_by_id(
|
||||
self, image_id: str | None, accept_missing_image: bool = False
|
||||
) -> dict[str, t.Any] | None: ...
|
||||
|
||||
def find_image_by_id(
|
||||
self, image_id: str | None, accept_missing_image: bool = False
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by ID) and return the inspection results.
|
||||
"""
|
||||
if not image_id:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {image_id} (by ID)")
|
||||
rc, image, stderr = self.call_cli_json("image", "inspect", image_id)
|
||||
if not image:
|
||||
if not accept_missing_image:
|
||||
self.fail(f"Error inspecting image ID {image_id} - {to_text(stderr)}")
|
||||
self.log(f"Image {image_id} not found.")
|
||||
return None
|
||||
if rc != 0:
|
||||
self.fail(f"Error inspecting image ID {image_id} - {to_text(stderr)}")
|
||||
return image[0]
|
||||
|
||||
|
||||
class AnsibleModuleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(
|
||||
self,
|
||||
argument_spec: dict[str, t.Any] | None = None,
|
||||
supports_check_mode: bool = False,
|
||||
mutually_exclusive: Sequence[Sequence[str]] | None = None,
|
||||
required_together: Sequence[Sequence[str]] | None = None,
|
||||
required_if: (
|
||||
Sequence[
|
||||
tuple[str, t.Any, Sequence[str]]
|
||||
| tuple[str, t.Any, Sequence[str], bool]
|
||||
]
|
||||
| None
|
||||
) = None,
|
||||
required_one_of: Sequence[Sequence[str]] | None = None,
|
||||
required_by: Mapping[str, Sequence[str]] | None = None,
|
||||
min_docker_api_version: str | None = None,
|
||||
fail_results: dict[str, t.Any] | None = None,
|
||||
needs_api_version: bool = True,
|
||||
) -> None:
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = {}
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params: list[Sequence[str]] = [
|
||||
("docker_host", "cli_context")
|
||||
]
|
||||
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params: list[Sequence[str]] = []
|
||||
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
required_by=required_by or {},
|
||||
)
|
||||
|
||||
self.debug = False # self.module.params['debug']
|
||||
self.check_mode = self.module.check_mode
|
||||
self.diff = self.module._diff
|
||||
|
||||
common_args = dict((k, self.module.params[k]) for k in DOCKER_COMMON_ARGS)
|
||||
super().__init__(
|
||||
common_args,
|
||||
min_docker_api_version=min_docker_api_version,
|
||||
needs_api_version=needs_api_version,
|
||||
)
|
||||
|
||||
def call_cli(
|
||||
self,
|
||||
*args: str,
|
||||
check_rc: bool = False,
|
||||
data: bytes | None = None,
|
||||
cwd: str | None = None,
|
||||
environ_update: dict[str, str] | None = None,
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
environment = self._environment.copy()
|
||||
if environ_update:
|
||||
environment.update(environ_update)
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
self._compose_cmd(args),
|
||||
binary_data=True,
|
||||
check_rc=check_rc,
|
||||
cwd=cwd,
|
||||
data=data,
|
||||
encoding=None,
|
||||
environ_update=environment,
|
||||
expand_user_and_vars=False,
|
||||
ignore_invalid_cwd=False,
|
||||
)
|
||||
return rc, stdout, stderr
|
||||
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
def warn(self, msg: str) -> None:
|
||||
self.module.warn(msg)
|
||||
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
self.module.deprecate(
|
||||
msg, version=version, date=date, collection_name=collection_name
|
||||
)
|
||||
1025
plugins/module_utils/_compose_v2.py
Normal file
1025
plugins/module_utils/_compose_v2.py
Normal file
File diff suppressed because it is too large
Load Diff
591
plugins/module_utils/_copy.py
Normal file
591
plugins/module_utils/_copy.py
Normal file
@ -0,0 +1,591 @@
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import shutil
|
||||
import stat
|
||||
import tarfile
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
NotFound,
|
||||
)
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
from _typeshed import WriteableBuffer
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||
APIClient,
|
||||
)
|
||||
|
||||
|
||||
class DockerFileCopyError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DockerUnexpectedError(DockerFileCopyError):
|
||||
pass
|
||||
|
||||
|
||||
class DockerFileNotFound(DockerFileCopyError):
|
||||
pass
|
||||
|
||||
|
||||
def _put_archive(
|
||||
client: APIClient, container: str, path: str, data: bytes | t.Generator[bytes]
|
||||
) -> bool:
|
||||
# data can also be file object for streaming. This is because _put uses requests's put().
|
||||
# See https://requests.readthedocs.io/en/latest/user/advanced/#streaming-uploads
|
||||
url = client._url("/containers/{0}/archive", container)
|
||||
res = client._put(url, params={"path": path}, data=data)
|
||||
client._raise_for_status(res)
|
||||
return res.status_code == 200
|
||||
|
||||
|
||||
def _symlink_tar_creator(
|
||||
b_in_path: bytes,
|
||||
file_stat: os.stat_result,
|
||||
out_file: str | bytes,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int | None = None,
|
||||
user_name: str | None = None,
|
||||
) -> bytes:
|
||||
if not stat.S_ISLNK(file_stat.st_mode):
|
||||
raise DockerUnexpectedError("stat information is not for a symlink")
|
||||
bio = io.BytesIO()
|
||||
with tarfile.open(
|
||||
fileobj=bio, mode="w|", dereference=False, encoding="utf-8"
|
||||
) as tar:
|
||||
# Note that without both name (bytes) and arcname (unicode), this either fails for
|
||||
# Python 2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
|
||||
# form) it works with Python 2.7, 3.5, 3.6, and 3.7 up to 3.11
|
||||
tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.uname = ""
|
||||
if user_name:
|
||||
tarinfo.uname = user_name
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.gname = ""
|
||||
tarinfo.mode &= 0o700
|
||||
if mode is not None:
|
||||
tarinfo.mode = mode
|
||||
if not tarinfo.issym():
|
||||
raise DockerUnexpectedError("stat information is not for a symlink")
|
||||
tar.addfile(tarinfo)
|
||||
return bio.getvalue()
|
||||
|
||||
|
||||
def _symlink_tar_generator(
|
||||
b_in_path: bytes,
|
||||
file_stat: os.stat_result,
|
||||
out_file: str | bytes,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int | None = None,
|
||||
user_name: str | None = None,
|
||||
) -> t.Generator[bytes]:
|
||||
yield _symlink_tar_creator(
|
||||
b_in_path, file_stat, out_file, user_id, group_id, mode, user_name
|
||||
)
|
||||
|
||||
|
||||
def _regular_file_tar_generator(
|
||||
b_in_path: bytes,
|
||||
file_stat: os.stat_result,
|
||||
out_file: str | bytes,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int | None = None,
|
||||
user_name: str | None = None,
|
||||
) -> t.Generator[bytes]:
|
||||
if not stat.S_ISREG(file_stat.st_mode):
|
||||
raise DockerUnexpectedError("stat information is not for a regular file")
|
||||
tarinfo = tarfile.TarInfo()
|
||||
tarinfo.name = (
|
||||
os.path.splitdrive(to_text(out_file))[1].replace(os.sep, "/").lstrip("/")
|
||||
)
|
||||
tarinfo.mode = (file_stat.st_mode & 0o700) if mode is None else mode
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.size = file_stat.st_size
|
||||
tarinfo.mtime = file_stat.st_mtime
|
||||
tarinfo.type = tarfile.REGTYPE
|
||||
tarinfo.linkname = ""
|
||||
if user_name:
|
||||
tarinfo.uname = user_name
|
||||
|
||||
tarinfo_buf = tarinfo.tobuf()
|
||||
total_size = len(tarinfo_buf)
|
||||
yield tarinfo_buf
|
||||
|
||||
size = tarinfo.size
|
||||
total_size += size
|
||||
with open(b_in_path, "rb") as f:
|
||||
while size > 0:
|
||||
to_read = min(size, 65536)
|
||||
buf = f.read(to_read)
|
||||
if not buf:
|
||||
break
|
||||
size -= len(buf)
|
||||
yield buf
|
||||
if size:
|
||||
# If for some reason the file shrunk, fill up to the announced size with zeros.
|
||||
# (If it enlarged, ignore the remainder.)
|
||||
yield tarfile.NUL * size
|
||||
|
||||
remainder = tarinfo.size % tarfile.BLOCKSIZE
|
||||
if remainder:
|
||||
# We need to write a multiple of 512 bytes. Fill up with zeros.
|
||||
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
|
||||
total_size += tarfile.BLOCKSIZE - remainder
|
||||
|
||||
# End with two zeroed blocks
|
||||
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
|
||||
total_size += 2 * tarfile.BLOCKSIZE
|
||||
|
||||
remainder = total_size % tarfile.RECORDSIZE
|
||||
if remainder > 0:
|
||||
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
|
||||
|
||||
|
||||
def _regular_content_tar_generator(
|
||||
content: bytes,
|
||||
out_file: str | bytes,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int,
|
||||
user_name: str | None = None,
|
||||
) -> t.Generator[bytes]:
|
||||
tarinfo = tarfile.TarInfo()
|
||||
tarinfo.name = (
|
||||
os.path.splitdrive(to_text(out_file))[1].replace(os.sep, "/").lstrip("/")
|
||||
)
|
||||
tarinfo.mode = mode
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.size = len(content)
|
||||
tarinfo.mtime = int(datetime.datetime.now().timestamp())
|
||||
tarinfo.type = tarfile.REGTYPE
|
||||
tarinfo.linkname = ""
|
||||
if user_name:
|
||||
tarinfo.uname = user_name
|
||||
|
||||
tarinfo_buf = tarinfo.tobuf()
|
||||
total_size = len(tarinfo_buf)
|
||||
yield tarinfo_buf
|
||||
|
||||
total_size += len(content)
|
||||
yield content
|
||||
|
||||
remainder = tarinfo.size % tarfile.BLOCKSIZE
|
||||
if remainder:
|
||||
# We need to write a multiple of 512 bytes. Fill up with zeros.
|
||||
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
|
||||
total_size += tarfile.BLOCKSIZE - remainder
|
||||
|
||||
# End with two zeroed blocks
|
||||
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
|
||||
total_size += 2 * tarfile.BLOCKSIZE
|
||||
|
||||
remainder = total_size % tarfile.RECORDSIZE
|
||||
if remainder > 0:
|
||||
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
|
||||
|
||||
|
||||
def put_file(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
in_path: str,
|
||||
out_path: str,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int | None = None,
|
||||
user_name: str | None = None,
|
||||
follow_links: bool = False,
|
||||
) -> None:
|
||||
"""Transfer a file from local to Docker container."""
|
||||
if not os.path.exists(to_bytes(in_path, errors="surrogate_or_strict")):
|
||||
raise DockerFileNotFound(f"file or module does not exist: {to_text(in_path)}")
|
||||
|
||||
b_in_path = to_bytes(in_path, errors="surrogate_or_strict")
|
||||
|
||||
out_dir, out_file = os.path.split(out_path)
|
||||
|
||||
if follow_links:
|
||||
file_stat = os.stat(b_in_path)
|
||||
else:
|
||||
file_stat = os.lstat(b_in_path)
|
||||
|
||||
if stat.S_ISREG(file_stat.st_mode):
|
||||
stream = _regular_file_tar_generator(
|
||||
b_in_path,
|
||||
file_stat,
|
||||
out_file,
|
||||
user_id,
|
||||
group_id,
|
||||
mode=mode,
|
||||
user_name=user_name,
|
||||
)
|
||||
elif stat.S_ISLNK(file_stat.st_mode):
|
||||
stream = _symlink_tar_generator(
|
||||
b_in_path,
|
||||
file_stat,
|
||||
out_file,
|
||||
user_id,
|
||||
group_id,
|
||||
mode=mode,
|
||||
user_name=user_name,
|
||||
)
|
||||
else:
|
||||
file_part = " referenced by" if follow_links else ""
|
||||
raise DockerFileCopyError(
|
||||
f"File{file_part} {in_path} is neither a regular file nor a symlink (stat mode {oct(file_stat.st_mode)})."
|
||||
)
|
||||
|
||||
ok = _put_archive(client, container, out_dir, stream)
|
||||
if not ok:
|
||||
raise DockerUnexpectedError(
|
||||
f'Unknown error while creating file "{out_path}" in container "{container}".'
|
||||
)
|
||||
|
||||
|
||||
def put_file_content(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
content: bytes,
|
||||
out_path: str,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int,
|
||||
user_name: str | None = None,
|
||||
) -> None:
|
||||
"""Transfer a file from local to Docker container."""
|
||||
out_dir, out_file = os.path.split(out_path)
|
||||
|
||||
stream = _regular_content_tar_generator(
|
||||
content, out_file, user_id, group_id, mode, user_name=user_name
|
||||
)
|
||||
|
||||
ok = _put_archive(client, container, out_dir, stream)
|
||||
if not ok:
|
||||
raise DockerUnexpectedError(
|
||||
f'Unknown error while creating file "{out_path}" in container "{container}".'
|
||||
)
|
||||
|
||||
|
||||
def stat_file(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
in_path: str,
|
||||
follow_links: bool = False,
|
||||
log: Callable[[str], None] | None = None,
|
||||
) -> tuple[str, dict[str, t.Any] | None, str | None]:
|
||||
"""Fetch information on a file from a Docker container to local.
|
||||
|
||||
Return a tuple ``(path, stat_data, link_target)`` where:
|
||||
|
||||
:path: is the resolved path in case ``follow_links=True``;
|
||||
:stat_data: is ``None`` if the file does not exist, or a dictionary with fields
|
||||
``name`` (string), ``size`` (integer), ``mode`` (integer, see https://pkg.go.dev/io/fs#FileMode),
|
||||
``mtime`` (string), and ``linkTarget`` (string);
|
||||
:link_target: is ``None`` if the file is not a symlink or when ``follow_links=False``,
|
||||
and a string with the symlink target otherwise.
|
||||
"""
|
||||
considered_in_paths = set()
|
||||
|
||||
while True:
|
||||
if in_path in considered_in_paths:
|
||||
raise DockerFileCopyError(
|
||||
f"Found infinite symbolic link loop when trying to stating {in_path!r}"
|
||||
)
|
||||
considered_in_paths.add(in_path)
|
||||
|
||||
if log:
|
||||
log(f"FETCH: Stating {in_path!r}")
|
||||
|
||||
response = client._head(
|
||||
client._url("/containers/{0}/archive", container),
|
||||
params={"path": in_path},
|
||||
)
|
||||
if response.status_code == 404:
|
||||
return in_path, None, None
|
||||
client._raise_for_status(response)
|
||||
header = response.headers.get("x-docker-container-path-stat")
|
||||
try:
|
||||
if header is None:
|
||||
raise ValueError("x-docker-container-path-stat header not present")
|
||||
stat_data = json.loads(base64.b64decode(header))
|
||||
except Exception as exc:
|
||||
raise DockerUnexpectedError(
|
||||
f"When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}"
|
||||
) from exc
|
||||
|
||||
# https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink
|
||||
if stat_data["mode"] & (1 << (32 - 5)) != 0:
|
||||
link_target = stat_data["linkTarget"]
|
||||
if not follow_links:
|
||||
return in_path, stat_data, link_target
|
||||
in_path = os.path.join(os.path.split(in_path)[0], link_target)
|
||||
continue
|
||||
|
||||
return in_path, stat_data, None
|
||||
|
||||
|
||||
class _RawGeneratorFileobj(io.RawIOBase):
|
||||
def __init__(self, stream: t.Generator[bytes]):
|
||||
self._stream = stream
|
||||
self._buf = b""
|
||||
|
||||
def readable(self) -> bool:
|
||||
return True
|
||||
|
||||
def _readinto_from_buf(self, b: WriteableBuffer, index: int, length: int) -> int:
|
||||
cpy = min(length - index, len(self._buf))
|
||||
if cpy:
|
||||
b[index : index + cpy] = self._buf[:cpy] # type: ignore # TODO!
|
||||
self._buf = self._buf[cpy:]
|
||||
index += cpy
|
||||
return index
|
||||
|
||||
def readinto(self, b: WriteableBuffer) -> int:
|
||||
index = 0
|
||||
length = len(b) # type: ignore # TODO!
|
||||
|
||||
index = self._readinto_from_buf(b, index, length)
|
||||
if index == length:
|
||||
return index
|
||||
|
||||
try:
|
||||
self._buf += next(self._stream)
|
||||
except StopIteration:
|
||||
return index
|
||||
|
||||
return self._readinto_from_buf(b, index, length)
|
||||
|
||||
|
||||
def _stream_generator_to_fileobj(stream: t.Generator[bytes]) -> io.BufferedReader:
|
||||
"""Given a generator that generates chunks of bytes, create a readable buffered stream."""
|
||||
raw = _RawGeneratorFileobj(stream)
|
||||
return io.BufferedReader(raw)
|
||||
|
||||
|
||||
_T = t.TypeVar("_T")
|
||||
|
||||
|
||||
def fetch_file_ex(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
in_path: str,
|
||||
process_none: Callable[[str], _T],
|
||||
process_regular: Callable[[str, tarfile.TarFile, tarfile.TarInfo], _T],
|
||||
process_symlink: Callable[[str, tarfile.TarInfo], _T],
|
||||
process_other: Callable[[str, tarfile.TarInfo], _T],
|
||||
follow_links: bool = False,
|
||||
log: Callable[[str], None] | None = None,
|
||||
) -> _T:
|
||||
"""Fetch a file (as a tar file entry) from a Docker container to local."""
|
||||
considered_in_paths: set[str] = set()
|
||||
|
||||
while True:
|
||||
if in_path in considered_in_paths:
|
||||
raise DockerFileCopyError(
|
||||
f'Found infinite symbolic link loop when trying to fetch "{in_path}"'
|
||||
)
|
||||
considered_in_paths.add(in_path)
|
||||
|
||||
if log:
|
||||
log(f'FETCH: Fetching "{in_path}"')
|
||||
try:
|
||||
stream = client.get_raw_stream(
|
||||
"/containers/{0}/archive",
|
||||
container,
|
||||
params={"path": in_path},
|
||||
headers={"Accept-Encoding": "identity"},
|
||||
)
|
||||
except NotFound:
|
||||
return process_none(in_path)
|
||||
|
||||
with tarfile.open(
|
||||
fileobj=_stream_generator_to_fileobj(stream), mode="r|"
|
||||
) as tar:
|
||||
symlink_member: tarfile.TarInfo | None = None
|
||||
result: _T | None = None
|
||||
found = False
|
||||
for member in tar:
|
||||
if found:
|
||||
raise DockerUnexpectedError(
|
||||
"Received tarfile contains more than one file!"
|
||||
)
|
||||
found = True
|
||||
if member.issym():
|
||||
symlink_member = member
|
||||
continue
|
||||
if member.isfile():
|
||||
result = process_regular(in_path, tar, member)
|
||||
continue
|
||||
result = process_other(in_path, member)
|
||||
if symlink_member:
|
||||
if not follow_links:
|
||||
return process_symlink(in_path, symlink_member)
|
||||
in_path = os.path.join(
|
||||
os.path.split(in_path)[0], symlink_member.linkname
|
||||
)
|
||||
if log:
|
||||
log(f'FETCH: Following symbolic link to "{in_path}"')
|
||||
continue
|
||||
if found:
|
||||
return result # type: ignore
|
||||
raise DockerUnexpectedError("Received tarfile is empty!")
|
||||
|
||||
|
||||
def fetch_file(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
in_path: str,
|
||||
out_path: str,
|
||||
follow_links: bool = False,
|
||||
log: Callable[[str], None] | None = None,
|
||||
) -> str:
|
||||
b_out_path = to_bytes(out_path, errors="surrogate_or_strict")
|
||||
|
||||
def process_none(in_path: str) -> str:
|
||||
raise DockerFileNotFound(
|
||||
f"File {in_path} does not exist in container {container}"
|
||||
)
|
||||
|
||||
def process_regular(
|
||||
in_path: str, tar: tarfile.TarFile, member: tarfile.TarInfo
|
||||
) -> str:
|
||||
if not follow_links and os.path.exists(b_out_path):
|
||||
os.unlink(b_out_path)
|
||||
|
||||
reader = tar.extractfile(member)
|
||||
if reader:
|
||||
with reader as in_f, open(b_out_path, "wb") as out_f:
|
||||
shutil.copyfileobj(in_f, out_f)
|
||||
return in_path
|
||||
|
||||
def process_symlink(in_path: str, member: tarfile.TarInfo) -> str:
|
||||
if os.path.exists(b_out_path):
|
||||
os.unlink(b_out_path)
|
||||
|
||||
os.symlink(member.linkname, b_out_path)
|
||||
return in_path
|
||||
|
||||
def process_other(in_path: str, member: tarfile.TarInfo) -> str:
|
||||
raise DockerFileCopyError(
|
||||
f'Remote file "{in_path}" is not a regular file or a symbolic link'
|
||||
)
|
||||
|
||||
return fetch_file_ex(
|
||||
client,
|
||||
container,
|
||||
in_path,
|
||||
process_none,
|
||||
process_regular,
|
||||
process_symlink,
|
||||
process_other,
|
||||
follow_links=follow_links,
|
||||
log=log,
|
||||
)
|
||||
|
||||
|
||||
def _execute_command(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
command: list[str],
|
||||
log: Callable[[str], None] | None = None,
|
||||
check_rc: bool = False,
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
if log:
|
||||
log(f"Executing {command} in {container}")
|
||||
|
||||
data = {
|
||||
"Container": container,
|
||||
"User": "",
|
||||
"Privileged": False,
|
||||
"Tty": False,
|
||||
"AttachStdin": False,
|
||||
"AttachStdout": True,
|
||||
"AttachStderr": True,
|
||||
"Cmd": command,
|
||||
}
|
||||
|
||||
if "detachKeys" in client._general_configs:
|
||||
data["detachKeys"] = client._general_configs["detachKeys"]
|
||||
|
||||
try:
|
||||
exec_data = client.post_json_to_json(
|
||||
"/containers/{0}/exec", container, data=data
|
||||
)
|
||||
except NotFound as e:
|
||||
raise DockerFileCopyError(f'Could not find container "{container}"') from e
|
||||
except APIError as e:
|
||||
if e.response is not None and e.response.status_code == 409:
|
||||
raise DockerFileCopyError(
|
||||
f'Cannot execute command in paused container "{container}"'
|
||||
) from e
|
||||
raise
|
||||
exec_id = exec_data["Id"]
|
||||
|
||||
data = {"Tty": False, "Detach": False}
|
||||
stdout, stderr = client.post_json_to_stream(
|
||||
"/exec/{0}/start", exec_id, stream=False, demux=True, tty=False
|
||||
)
|
||||
|
||||
result = client.get_json("/exec/{0}/json", exec_id)
|
||||
|
||||
rc: int = result.get("ExitCode") or 0
|
||||
stdout = stdout or b""
|
||||
stderr = stderr or b""
|
||||
|
||||
if log:
|
||||
log(f"Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}")
|
||||
|
||||
if check_rc and rc != 0:
|
||||
command_str = " ".join(command)
|
||||
raise DockerUnexpectedError(
|
||||
f'Obtained unexpected exit code {rc} when running "{command_str}" in {container}.\nSTDOUT: {stdout!r}\nSTDERR: {stderr!r}'
|
||||
)
|
||||
|
||||
return rc, stdout, stderr
|
||||
|
||||
|
||||
def determine_user_group(
|
||||
client: APIClient, container: str, log: Callable[[str], None] | None = None
|
||||
) -> tuple[int, int]:
|
||||
dummy_rc, stdout, dummy_stderr = _execute_command(
|
||||
client, container, ["/bin/sh", "-c", "id -u && id -g"], check_rc=True, log=log
|
||||
)
|
||||
|
||||
stdout_lines = stdout.splitlines()
|
||||
if len(stdout_lines) != 2:
|
||||
raise DockerUnexpectedError(
|
||||
f"Expected two-line output to obtain user and group ID for container {container}, but got {len(stdout_lines)} lines:\n{stdout!r}"
|
||||
)
|
||||
|
||||
user_id, group_id = stdout_lines
|
||||
try:
|
||||
return int(user_id), int(group_id)
|
||||
except ValueError as exc:
|
||||
raise DockerUnexpectedError(
|
||||
f"Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got {user_id!r} and {group_id!r} instead"
|
||||
) from exc
|
||||
166
plugins/module_utils/_image_archive.py
Normal file
166
plugins/module_utils/_image_archive.py
Normal file
@ -0,0 +1,166 @@
|
||||
# Copyright 2022 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import tarfile
|
||||
|
||||
|
||||
class ImageArchiveManifestSummary:
|
||||
"""
|
||||
Represents data extracted from a manifest.json found in the tar archive output of the
|
||||
"docker image save some:tag > some.tar" command.
|
||||
"""
|
||||
|
||||
def __init__(self, image_id: str, repo_tags: list[str]) -> None:
|
||||
"""
|
||||
:param image_id: File name portion of Config entry, e.g. abcde12345 from abcde12345.json
|
||||
:param repo_tags Docker image names, e.g. ["hello-world:latest"]
|
||||
"""
|
||||
|
||||
self.image_id = image_id
|
||||
self.repo_tags = repo_tags
|
||||
|
||||
|
||||
class ImageArchiveInvalidException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def api_image_id(archive_image_id: str) -> str:
|
||||
"""
|
||||
Accepts an image hash in the format stored in manifest.json, and returns an equivalent identifier
|
||||
that represents the same image hash, but in the format presented by the Docker Engine API.
|
||||
|
||||
:param archive_image_id: plain image hash
|
||||
:returns: Prefixed hash used by REST api
|
||||
"""
|
||||
|
||||
return f"sha256:{archive_image_id}"
|
||||
|
||||
|
||||
def load_archived_image_manifest(
|
||||
archive_path: str,
|
||||
) -> list[ImageArchiveManifestSummary] | None:
|
||||
"""
|
||||
Attempts to get image IDs and image names from metadata stored in the image
|
||||
archive tar file.
|
||||
|
||||
The tar should contain a file "manifest.json" with an array with one or more entries,
|
||||
and every entry should have a Config field with the image ID in its file name, as
|
||||
well as a RepoTags list, which typically has only one entry.
|
||||
|
||||
:raises:
|
||||
ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
|
||||
|
||||
:param archive_path: Tar file to read
|
||||
:return: None, if no file at archive_path, or a list of ImageArchiveManifestSummary objects.
|
||||
"""
|
||||
|
||||
try:
|
||||
# FileNotFoundError does not exist in Python 2
|
||||
if not os.path.isfile(archive_path):
|
||||
return None
|
||||
|
||||
with tarfile.open(archive_path, "r") as tf:
|
||||
try:
|
||||
try:
|
||||
reader = tf.extractfile("manifest.json")
|
||||
if reader is None:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Failed to read manifest.json"
|
||||
)
|
||||
with reader as ef:
|
||||
manifest = json.load(ef)
|
||||
except ImageArchiveInvalidException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to decode and deserialize manifest.json: {exc}"
|
||||
) from exc
|
||||
|
||||
if len(manifest) == 0:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Expected to have at least one entry in manifest.json but found none"
|
||||
)
|
||||
|
||||
result = []
|
||||
for index, meta in enumerate(manifest):
|
||||
try:
|
||||
config_file = meta["Config"]
|
||||
except KeyError as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to get Config entry from {index + 1}th manifest in manifest.json: {exc}"
|
||||
) from exc
|
||||
|
||||
# Extracts hash without 'sha256:' prefix
|
||||
try:
|
||||
# Strip off .json filename extension, leaving just the hash.
|
||||
image_id = os.path.splitext(config_file)[0]
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to extract image id from config file name {config_file}: {exc}"
|
||||
) from exc
|
||||
|
||||
for prefix in ("blobs/sha256/",): # Moby 25.0.0, Docker API 1.44
|
||||
if image_id.startswith(prefix):
|
||||
image_id = image_id[len(prefix) :]
|
||||
|
||||
try:
|
||||
repo_tags = meta["RepoTags"]
|
||||
except KeyError as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to get RepoTags entry from {index + 1}th manifest in manifest.json: {exc}"
|
||||
) from exc
|
||||
|
||||
result.append(
|
||||
ImageArchiveManifestSummary(
|
||||
image_id=image_id, repo_tags=repo_tags
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
except ImageArchiveInvalidException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to extract manifest.json from tar file {archive_path}: {exc}"
|
||||
) from exc
|
||||
|
||||
except ImageArchiveInvalidException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to open tar file {archive_path}: {exc}"
|
||||
) from exc
|
||||
|
||||
|
||||
def archived_image_manifest(archive_path: str) -> ImageArchiveManifestSummary | None:
|
||||
"""
|
||||
Attempts to get Image.Id and image name from metadata stored in the image
|
||||
archive tar file.
|
||||
|
||||
The tar should contain a file "manifest.json" with an array with a single entry,
|
||||
and the entry should have a Config field with the image ID in its file name, as
|
||||
well as a RepoTags list, which typically has only one entry.
|
||||
|
||||
:raises:
|
||||
ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
|
||||
|
||||
:param archive_path: Tar file to read
|
||||
:return: None, if no file at archive_path, or the extracted image ID, which will not have a sha256: prefix.
|
||||
"""
|
||||
|
||||
results = load_archived_image_manifest(archive_path)
|
||||
if results is None:
|
||||
return None
|
||||
if len(results) == 1:
|
||||
return results[0]
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Expected to have one entry in manifest.json but found {len(results)}"
|
||||
)
|
||||
212
plugins/module_utils/_logfmt.py
Normal file
212
plugins/module_utils/_logfmt.py
Normal file
@ -0,0 +1,212 @@
|
||||
# Copyright (c) 2024, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
"""
|
||||
Parse go logfmt messages.
|
||||
|
||||
See https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc for information on the format.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from enum import Enum
|
||||
|
||||
|
||||
# The format is defined in https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc
|
||||
# (look for "EBNFish")
|
||||
|
||||
|
||||
class InvalidLogFmt(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _Mode(Enum):
|
||||
GARBAGE = 0
|
||||
KEY = 1
|
||||
EQUAL = 2
|
||||
IDENT_VALUE = 3
|
||||
QUOTED_VALUE = 4
|
||||
|
||||
|
||||
_ESCAPE_DICT = {
|
||||
'"': '"',
|
||||
"\\": "\\",
|
||||
"'": "'",
|
||||
"/": "/",
|
||||
"b": "\b",
|
||||
"f": "\f",
|
||||
"n": "\n",
|
||||
"r": "\r",
|
||||
"t": "\t",
|
||||
}
|
||||
|
||||
_HEX_DICT = {
|
||||
"0": 0,
|
||||
"1": 1,
|
||||
"2": 2,
|
||||
"3": 3,
|
||||
"4": 4,
|
||||
"5": 5,
|
||||
"6": 6,
|
||||
"7": 7,
|
||||
"8": 8,
|
||||
"9": 9,
|
||||
"a": 0xA,
|
||||
"b": 0xB,
|
||||
"c": 0xC,
|
||||
"d": 0xD,
|
||||
"e": 0xE,
|
||||
"f": 0xF,
|
||||
"A": 0xA,
|
||||
"B": 0xB,
|
||||
"C": 0xC,
|
||||
"D": 0xD,
|
||||
"E": 0xE,
|
||||
"F": 0xF,
|
||||
}
|
||||
|
||||
|
||||
def _is_ident(cur: str) -> bool:
|
||||
return cur > " " and cur not in ('"', "=")
|
||||
|
||||
|
||||
class _Parser:
|
||||
def __init__(self, line: str) -> None:
|
||||
self.line = line
|
||||
self.index = 0
|
||||
self.length = len(line)
|
||||
|
||||
def done(self) -> bool:
|
||||
return self.index >= self.length
|
||||
|
||||
def cur(self) -> str:
|
||||
return self.line[self.index]
|
||||
|
||||
def next(self) -> None:
|
||||
self.index += 1
|
||||
|
||||
def prev(self) -> None:
|
||||
self.index -= 1
|
||||
|
||||
def parse_unicode_sequence(self) -> str:
|
||||
if self.index + 6 > self.length:
|
||||
raise InvalidLogFmt("Not enough space for unicode escape")
|
||||
if self.line[self.index : self.index + 2] != "\\u":
|
||||
raise InvalidLogFmt("Invalid unicode escape start")
|
||||
v = 0
|
||||
for dummy_index in range(self.index + 2, self.index + 6):
|
||||
v <<= 4
|
||||
try:
|
||||
v += _HEX_DICT[self.line[self.index]]
|
||||
except KeyError:
|
||||
raise InvalidLogFmt(
|
||||
f"Invalid unicode escape digit {self.line[self.index]!r}"
|
||||
) from None
|
||||
self.index += 6
|
||||
return chr(v)
|
||||
|
||||
|
||||
def parse_line(line: str, logrus_mode: bool = False) -> dict[str, t.Any]:
|
||||
result: dict[str, t.Any] = {}
|
||||
parser = _Parser(line)
|
||||
key: list[str] = []
|
||||
value: list[str] = []
|
||||
mode = _Mode.GARBAGE
|
||||
|
||||
def handle_kv(has_no_value: bool = False) -> None:
|
||||
k = "".join(key)
|
||||
v = None if has_no_value else "".join(value)
|
||||
result[k] = v
|
||||
del key[:]
|
||||
del value[:]
|
||||
|
||||
def parse_garbage(cur: str) -> _Mode:
|
||||
if _is_ident(cur):
|
||||
return _Mode.KEY
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_key(cur: str) -> _Mode:
|
||||
if _is_ident(cur):
|
||||
key.append(cur)
|
||||
parser.next()
|
||||
return _Mode.KEY
|
||||
if cur == "=":
|
||||
parser.next()
|
||||
return _Mode.EQUAL
|
||||
if logrus_mode:
|
||||
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
|
||||
handle_kv(has_no_value=True)
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_equal(cur: str) -> _Mode:
|
||||
if _is_ident(cur):
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.IDENT_VALUE
|
||||
if cur == '"':
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_ident_value(cur: str) -> _Mode:
|
||||
if _is_ident(cur):
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.IDENT_VALUE
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_quoted_value(cur: str) -> _Mode:
|
||||
if cur == "\\":
|
||||
parser.next()
|
||||
if parser.done():
|
||||
raise InvalidLogFmt("Unterminated escape sequence in quoted string")
|
||||
cur = parser.cur()
|
||||
if cur in _ESCAPE_DICT:
|
||||
value.append(_ESCAPE_DICT[cur])
|
||||
elif cur != "u":
|
||||
es = f"\\{cur}"
|
||||
raise InvalidLogFmt(f"Unknown escape sequence {es!r}")
|
||||
else:
|
||||
parser.prev()
|
||||
value.append(parser.parse_unicode_sequence())
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
if cur == '"':
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
if cur < " ":
|
||||
raise InvalidLogFmt("Control characters in quoted string are not allowed")
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
|
||||
parsers = {
|
||||
_Mode.GARBAGE: parse_garbage,
|
||||
_Mode.KEY: parse_key,
|
||||
_Mode.EQUAL: parse_equal,
|
||||
_Mode.IDENT_VALUE: parse_ident_value,
|
||||
_Mode.QUOTED_VALUE: parse_quoted_value,
|
||||
}
|
||||
while not parser.done():
|
||||
mode = parsers[mode](parser.cur())
|
||||
if mode == _Mode.KEY and logrus_mode:
|
||||
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
|
||||
if mode in (_Mode.KEY, _Mode.EQUAL):
|
||||
handle_kv(has_no_value=True)
|
||||
elif mode == _Mode.IDENT_VALUE:
|
||||
handle_kv()
|
||||
elif mode == _Mode.QUOTED_VALUE:
|
||||
raise InvalidLogFmt("Unterminated quoted string")
|
||||
return result
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user