]> git.proxmox.com Git - ceph.git/blob - ceph/src/arrow/dev/archery/archery/docker.py
import quincy 17.2.0
[ceph.git] / ceph / src / arrow / dev / archery / archery / docker.py
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 import os
19 import re
20 import subprocess
21 from io import StringIO
22
23 from dotenv import dotenv_values
24 from ruamel.yaml import YAML
25
26 from .utils.command import Command, default_bin
27 from .compat import _ensure_path
28
29
30 def flatten(node, parents=None):
31 parents = list(parents or [])
32 if isinstance(node, str):
33 yield (node, parents)
34 elif isinstance(node, list):
35 for value in node:
36 yield from flatten(value, parents=parents)
37 elif isinstance(node, dict):
38 for key, value in node.items():
39 yield (key, parents)
40 yield from flatten(value, parents=parents + [key])
41 else:
42 raise TypeError(node)
43
44
45 def _sanitize_command(cmd):
46 if isinstance(cmd, list):
47 cmd = " ".join(cmd)
48 return re.sub(r"\s+", " ", cmd)
49
50
51 class UndefinedImage(Exception):
52 pass
53
54
55 class ComposeConfig:
56
57 def __init__(self, config_path, dotenv_path, compose_bin, params=None):
58 config_path = _ensure_path(config_path)
59 if dotenv_path:
60 dotenv_path = _ensure_path(dotenv_path)
61 else:
62 dotenv_path = config_path.parent / '.env'
63 self._read_env(dotenv_path, params)
64 self._read_config(config_path, compose_bin)
65
66 def _read_env(self, dotenv_path, params):
67 """
68 Read .env and merge it with explicitly passed parameters.
69 """
70 self.dotenv = dotenv_values(str(dotenv_path))
71 if params is None:
72 self.params = {}
73 else:
74 self.params = {k: v for k, v in params.items() if k in self.dotenv}
75
76 # forward the process' environment variables
77 self.env = os.environ.copy()
78 # set the defaults from the dotenv files
79 self.env.update(self.dotenv)
80 # override the defaults passed as parameters
81 self.env.update(self.params)
82
83 # translate docker's architecture notation to a more widely used one
84 arch = self.env.get('ARCH', 'amd64')
85 arch_aliases = {
86 'amd64': 'x86_64',
87 'arm64v8': 'aarch64',
88 's390x': 's390x'
89 }
90 arch_short_aliases = {
91 'amd64': 'x64',
92 'arm64v8': 'arm64',
93 's390x': 's390x'
94 }
95 self.env['ARCH_ALIAS'] = arch_aliases.get(arch, arch)
96 self.env['ARCH_SHORT_ALIAS'] = arch_short_aliases.get(arch, arch)
97
98 def _read_config(self, config_path, compose_bin):
99 """
100 Validate and read the docker-compose.yml
101 """
102 yaml = YAML()
103 with config_path.open() as fp:
104 config = yaml.load(fp)
105
106 services = config['services'].keys()
107 self.hierarchy = dict(flatten(config.get('x-hierarchy', {})))
108 self.with_gpus = config.get('x-with-gpus', [])
109 nodes = self.hierarchy.keys()
110 errors = []
111
112 for name in self.with_gpus:
113 if name not in services:
114 errors.append(
115 'Service `{}` defined in `x-with-gpus` bot not in '
116 '`services`'.format(name)
117 )
118 for name in nodes - services:
119 errors.append(
120 'Service `{}` is defined in `x-hierarchy` bot not in '
121 '`services`'.format(name)
122 )
123 for name in services - nodes:
124 errors.append(
125 'Service `{}` is defined in `services` but not in '
126 '`x-hierarchy`'.format(name)
127 )
128
129 # trigger docker-compose's own validation
130 compose = Command('docker-compose')
131 args = ['--file', str(config_path), 'config']
132 result = compose.run(*args, env=self.env, check=False,
133 stderr=subprocess.PIPE, stdout=subprocess.PIPE)
134
135 if result.returncode != 0:
136 # strip the intro line of docker-compose errors
137 errors += result.stderr.decode().splitlines()
138
139 if errors:
140 msg = '\n'.join([' - {}'.format(msg) for msg in errors])
141 raise ValueError(
142 'Found errors with docker-compose:\n{}'.format(msg)
143 )
144
145 rendered_config = StringIO(result.stdout.decode())
146 self.path = config_path
147 self.config = yaml.load(rendered_config)
148
149 def get(self, service_name):
150 try:
151 service = self.config['services'][service_name]
152 except KeyError:
153 raise UndefinedImage(service_name)
154 service['name'] = service_name
155 service['need_gpu'] = service_name in self.with_gpus
156 service['ancestors'] = self.hierarchy[service_name]
157 return service
158
159 def __getitem__(self, service_name):
160 return self.get(service_name)
161
162
163 class Docker(Command):
164
165 def __init__(self, docker_bin=None):
166 self.bin = default_bin(docker_bin, "docker")
167
168
169 class DockerCompose(Command):
170
171 def __init__(self, config_path, dotenv_path=None, compose_bin=None,
172 params=None):
173 compose_bin = default_bin(compose_bin, 'docker-compose')
174 self.config = ComposeConfig(config_path, dotenv_path, compose_bin,
175 params)
176 self.bin = compose_bin
177 self.pull_memory = set()
178
179 def clear_pull_memory(self):
180 self.pull_memory = set()
181
182 def _execute_compose(self, *args, **kwargs):
183 # execute as a docker compose command
184 try:
185 result = super().run('--file', str(self.config.path), *args,
186 env=self.config.env, **kwargs)
187 result.check_returncode()
188 except subprocess.CalledProcessError as e:
189 def formatdict(d, template):
190 return '\n'.join(
191 template.format(k, v) for k, v in sorted(d.items())
192 )
193 msg = (
194 "`{cmd}` exited with a non-zero exit code {code}, see the "
195 "process log above.\n\nThe docker-compose command was "
196 "invoked with the following parameters:\n\nDefaults defined "
197 "in .env:\n{dotenv}\n\nArchery was called with:\n{params}"
198 )
199 raise RuntimeError(
200 msg.format(
201 cmd=' '.join(e.cmd),
202 code=e.returncode,
203 dotenv=formatdict(self.config.dotenv, template=' {}: {}'),
204 params=formatdict(
205 self.config.params, template=' export {}={}'
206 )
207 )
208 )
209
210 def _execute_docker(self, *args, **kwargs):
211 # execute as a plain docker cli command
212 try:
213 result = Docker().run(*args, **kwargs)
214 result.check_returncode()
215 except subprocess.CalledProcessError as e:
216 raise RuntimeError(
217 "{} exited with non-zero exit code {}".format(
218 ' '.join(e.cmd), e.returncode
219 )
220 )
221
222 def pull(self, service_name, pull_leaf=True, using_docker=False):
223 def _pull(service):
224 args = ['pull']
225 if service['image'] in self.pull_memory:
226 return
227
228 if using_docker:
229 try:
230 self._execute_docker(*args, service['image'])
231 except Exception as e:
232 # better --ignore-pull-failures handling
233 print(e)
234 else:
235 args.append('--ignore-pull-failures')
236 self._execute_compose(*args, service['name'])
237
238 self.pull_memory.add(service['image'])
239
240 service = self.config.get(service_name)
241 for ancestor in service['ancestors']:
242 _pull(self.config.get(ancestor))
243 if pull_leaf:
244 _pull(service)
245
246 def build(self, service_name, use_cache=True, use_leaf_cache=True,
247 using_docker=False, using_buildx=False):
248 def _build(service, use_cache):
249 if 'build' not in service:
250 # nothing to do
251 return
252
253 args = []
254 cache_from = list(service.get('build', {}).get('cache_from', []))
255 if use_cache:
256 for image in cache_from:
257 if image not in self.pull_memory:
258 try:
259 self._execute_docker('pull', image)
260 except Exception as e:
261 print(e)
262 finally:
263 self.pull_memory.add(image)
264 else:
265 args.append('--no-cache')
266
267 # turn on inline build cache, this is a docker buildx feature
268 # used to bundle the image build cache to the pushed image manifest
269 # so the build cache can be reused across hosts, documented at
270 # https://github.com/docker/buildx#--cache-tonametypetypekeyvalue
271 if self.config.env.get('BUILDKIT_INLINE_CACHE') == '1':
272 args.extend(['--build-arg', 'BUILDKIT_INLINE_CACHE=1'])
273
274 if using_buildx:
275 for k, v in service['build'].get('args', {}).items():
276 args.extend(['--build-arg', '{}={}'.format(k, v)])
277
278 if use_cache:
279 cache_ref = '{}-cache'.format(service['image'])
280 cache_from = 'type=registry,ref={}'.format(cache_ref)
281 cache_to = (
282 'type=registry,ref={},mode=max'.format(cache_ref)
283 )
284 args.extend([
285 '--cache-from', cache_from,
286 '--cache-to', cache_to,
287 ])
288
289 args.extend([
290 '--output', 'type=docker',
291 '-f', service['build']['dockerfile'],
292 '-t', service['image'],
293 service['build'].get('context', '.')
294 ])
295 self._execute_docker("buildx", "build", *args)
296 elif using_docker:
297 # better for caching
298 for k, v in service['build'].get('args', {}).items():
299 args.extend(['--build-arg', '{}={}'.format(k, v)])
300 for img in cache_from:
301 args.append('--cache-from="{}"'.format(img))
302 args.extend([
303 '-f', service['build']['dockerfile'],
304 '-t', service['image'],
305 service['build'].get('context', '.')
306 ])
307 self._execute_docker("build", *args)
308 else:
309 self._execute_compose("build", *args, service['name'])
310
311 service = self.config.get(service_name)
312 # build ancestor services
313 for ancestor in service['ancestors']:
314 _build(self.config.get(ancestor), use_cache=use_cache)
315 # build the leaf/target service
316 _build(service, use_cache=use_cache and use_leaf_cache)
317
318 def run(self, service_name, command=None, *, env=None, volumes=None,
319 user=None, using_docker=False):
320 service = self.config.get(service_name)
321
322 args = []
323 if user is not None:
324 args.extend(['-u', user])
325
326 if env is not None:
327 for k, v in env.items():
328 args.extend(['-e', '{}={}'.format(k, v)])
329
330 if volumes is not None:
331 for volume in volumes:
332 args.extend(['--volume', volume])
333
334 if using_docker or service['need_gpu']:
335 # use gpus, requires docker>=19.03
336 if service['need_gpu']:
337 args.extend(['--gpus', 'all'])
338
339 if service.get('shm_size'):
340 args.extend(['--shm-size', service['shm_size']])
341
342 # append env variables from the compose conf
343 for k, v in service.get('environment', {}).items():
344 args.extend(['-e', '{}={}'.format(k, v)])
345
346 # append volumes from the compose conf
347 for v in service.get('volumes', []):
348 if not isinstance(v, str):
349 # if not the compact string volume definition
350 v = "{}:{}".format(v['source'], v['target'])
351 args.extend(['-v', v])
352
353 # infer whether an interactive shell is desired or not
354 if command in ['cmd.exe', 'bash', 'sh', 'powershell']:
355 args.append('-it')
356
357 # get the actual docker image name instead of the compose service
358 # name which we refer as image in general
359 args.append(service['image'])
360
361 # add command from compose if it wasn't overridden
362 if command is not None:
363 args.append(command)
364 else:
365 # replace whitespaces from the preformatted compose command
366 cmd = _sanitize_command(service.get('command', ''))
367 if cmd:
368 args.append(cmd)
369
370 # execute as a plain docker cli command
371 self._execute_docker('run', '--rm', *args)
372 else:
373 # execute as a docker-compose command
374 args.append(service_name)
375 if command is not None:
376 args.append(command)
377 self._execute_compose('run', '--rm', *args)
378
379 def push(self, service_name, user=None, password=None, using_docker=False):
380 def _push(service):
381 if using_docker:
382 return self._execute_docker('push', service['image'])
383 else:
384 return self._execute_compose('push', service['name'])
385
386 if user is not None:
387 try:
388 # TODO(kszucs): have an option for a prompt
389 self._execute_docker('login', '-u', user, '-p', password)
390 except subprocess.CalledProcessError:
391 # hide credentials
392 msg = ('Failed to push `{}`, check the passed credentials'
393 .format(service_name))
394 raise RuntimeError(msg) from None
395
396 service = self.config.get(service_name)
397 for ancestor in service['ancestors']:
398 _push(self.config.get(ancestor))
399 _push(service)
400
401 def images(self):
402 return sorted(self.config.hierarchy.keys())