10 from textwrap
import dedent
12 from .fixtures
import (
21 from pyfakefs
import fake_filesystem
22 from pyfakefs
import fake_filesystem_unittest
24 _cephadm
= import_cephadm()
28 fsid
='00000000-0000-0000-0000-0000deadbeef',
29 mon_host
='[v2:192.168.1.1:3300/0,v1:192.168.1.1:6789/0]'):
31 # minimal ceph.conf for {fsid}
37 class TestCephAdm(object):
39 def test_docker_unit_file(self
):
40 ctx
= _cephadm
.CephadmContext()
41 ctx
.container_engine
= mock_docker()
42 r
= _cephadm
.get_unit_file(ctx
, '9b9d7609-f4d5-4aba-94c8-effa764d96c9')
43 assert 'Requires=docker.service' in r
44 ctx
.container_engine
= mock_podman()
45 r
= _cephadm
.get_unit_file(ctx
, '9b9d7609-f4d5-4aba-94c8-effa764d96c9')
46 assert 'Requires=docker.service' not in r
48 @mock.patch('cephadm.logger')
49 def test_attempt_bind(self
, _logger
):
56 _os_error
.errno
= errno
59 for side_effect
, expected_exception
in (
60 (os_error(errno
.EADDRINUSE
), _cephadm
.PortOccupiedError
),
61 (os_error(errno
.EAFNOSUPPORT
), OSError),
62 (os_error(errno
.EADDRNOTAVAIL
), OSError),
66 _socket
.bind
.side_effect
= side_effect
68 _cephadm
.attempt_bind(ctx
, _socket
, address
, port
)
69 except Exception as e
:
70 assert isinstance(e
, expected_exception
)
72 if expected_exception
is not None:
75 @mock.patch('cephadm.attempt_bind')
76 @mock.patch('cephadm.logger')
77 def test_port_in_use(self
, _logger
, _attempt_bind
):
80 assert _cephadm
.port_in_use(empty_ctx
, _cephadm
.EndPoint('0.0.0.0', 9100)) == False
82 _attempt_bind
.side_effect
= _cephadm
.PortOccupiedError('msg')
83 assert _cephadm
.port_in_use(empty_ctx
, _cephadm
.EndPoint('0.0.0.0', 9100)) == True
86 os_error
.errno
= errno
.EADDRNOTAVAIL
87 _attempt_bind
.side_effect
= os_error
88 assert _cephadm
.port_in_use(empty_ctx
, _cephadm
.EndPoint('0.0.0.0', 9100)) == False
91 os_error
.errno
= errno
.EAFNOSUPPORT
92 _attempt_bind
.side_effect
= os_error
93 assert _cephadm
.port_in_use(empty_ctx
, _cephadm
.EndPoint('0.0.0.0', 9100)) == False
95 @mock.patch('cephadm.socket.socket.bind')
96 @mock.patch('cephadm.logger')
97 def test_port_in_use_special_cases(self
, _logger
, _bind
):
98 # port_in_use has special handling for
99 # EAFNOSUPPORT and EADDRNOTAVAIL errno OSErrors.
100 # If we get those specific errors when attempting
101 # to bind to the ip:port we should not say the
105 _os_error
= OSError()
106 _os_error
.errno
= errno
109 _bind
.side_effect
= os_error(errno
.EADDRNOTAVAIL
)
110 in_use
= _cephadm
.port_in_use(None, _cephadm
.EndPoint('1.2.3.4', 10000))
111 assert in_use
== False
113 _bind
.side_effect
= os_error(errno
.EAFNOSUPPORT
)
114 in_use
= _cephadm
.port_in_use(None, _cephadm
.EndPoint('1.2.3.4', 10000))
115 assert in_use
== False
117 # this time, have it raise the actual port taken error
118 # so it should report the port is in use
119 _bind
.side_effect
= os_error(errno
.EADDRINUSE
)
120 in_use
= _cephadm
.port_in_use(None, _cephadm
.EndPoint('1.2.3.4', 10000))
121 assert in_use
== True
123 @mock.patch('cephadm.attempt_bind')
124 @mock.patch('cephadm.logger')
125 def test_port_in_use_with_specific_ips(self
, _logger
, _attempt_bind
):
128 def _fake_attempt_bind(ctx
, s
: socket
.socket
, addr
: str, port
: int) -> None:
129 occupied_error
= _cephadm
.PortOccupiedError('msg')
130 if addr
.startswith('200'):
132 if addr
.startswith('100'):
136 _attempt_bind
.side_effect
= _fake_attempt_bind
138 assert _cephadm
.port_in_use(empty_ctx
, _cephadm
.EndPoint('200.0.0.0', 9100)) == True
139 assert _cephadm
.port_in_use(empty_ctx
, _cephadm
.EndPoint('100.0.0.0', 9100)) == False
140 assert _cephadm
.port_in_use(empty_ctx
, _cephadm
.EndPoint('100.0.0.0', 4567)) == True
141 assert _cephadm
.port_in_use(empty_ctx
, _cephadm
.EndPoint('155.0.0.0', 4567)) == False
143 @mock.patch('socket.socket')
144 @mock.patch('cephadm.logger')
145 def test_check_ip_port_success(self
, _logger
, _socket
):
146 ctx
= _cephadm
.CephadmContext()
147 ctx
.skip_ping_check
= False # enables executing port check with `check_ip_port`
149 for address
, address_family
in (
150 ('0.0.0.0', socket
.AF_INET
),
151 ('::', socket
.AF_INET6
),
154 _cephadm
.check_ip_port(ctx
, _cephadm
.EndPoint(address
, 9100))
158 assert _socket
.call_args
== mock
.call(address_family
, socket
.SOCK_STREAM
)
160 @mock.patch('socket.socket')
161 @mock.patch('cephadm.logger')
162 def test_check_ip_port_failure(self
, _logger
, _socket
):
163 ctx
= _cephadm
.CephadmContext()
164 ctx
.skip_ping_check
= False # enables executing port check with `check_ip_port`
167 _os_error
= OSError()
168 _os_error
.errno
= errno
171 for address
, address_family
in (
172 ('0.0.0.0', socket
.AF_INET
),
173 ('::', socket
.AF_INET6
),
175 for side_effect
, expected_exception
in (
176 (os_error(errno
.EADDRINUSE
), _cephadm
.PortOccupiedError
),
177 (os_error(errno
.EADDRNOTAVAIL
), OSError),
178 (os_error(errno
.EAFNOSUPPORT
), OSError),
181 mock_socket_obj
= mock
.Mock()
182 mock_socket_obj
.bind
.side_effect
= side_effect
183 _socket
.return_value
= mock_socket_obj
185 _cephadm
.check_ip_port(ctx
, _cephadm
.EndPoint(address
, 9100))
186 except Exception as e
:
187 assert isinstance(e
, expected_exception
)
189 if side_effect
is not None:
193 def test_is_not_fsid(self
):
194 assert not _cephadm
.is_fsid('no-uuid')
196 def test_is_fsid(self
):
197 assert _cephadm
.is_fsid('e863154d-33c7-4350-bca5-921e0467e55b')
199 def test__get_parser_image(self
):
200 args
= _cephadm
._parse
_args
(['--image', 'foo', 'version'])
201 assert args
.image
== 'foo'
203 def test_check_required_global_args(self
):
204 ctx
= _cephadm
.CephadmContext()
205 mock_fn
= mock
.Mock()
206 mock_fn
.return_value
= 0
207 require_image
= _cephadm
.require_image(mock_fn
)
209 with pytest
.raises(_cephadm
.Error
, match
='This command requires the global --image option to be set'):
212 ctx
.image
= 'sample-image'
215 @mock.patch('cephadm.logger')
216 def test_parse_mem_usage(self
, _logger
):
217 len, summary
= _cephadm
._parse
_mem
_usage
(0, 'c6290e3f1489,-- / --')
220 def test_CustomValidation(self
):
221 assert _cephadm
._parse
_args
(['deploy', '--name', 'mon.a', '--fsid', 'fsid'])
223 with pytest
.raises(SystemExit):
224 _cephadm
._parse
_args
(['deploy', '--name', 'wrong', '--fsid', 'fsid'])
226 @pytest.mark
.parametrize("test_input, expected", [
228 ("1.6.2-stable2", (1,6,2)),
230 def test_parse_podman_version(self
, test_input
, expected
):
231 assert _cephadm
._parse
_podman
_version
(test_input
) == expected
233 def test_parse_podman_version_invalid(self
):
234 with pytest
.raises(ValueError) as res
:
235 _cephadm
._parse
_podman
_version
('inval.id')
236 assert 'inval' in str(res
.value
)
238 @mock.patch('cephadm.logger')
239 def test_is_ipv6(self
, _logger
):
240 for good
in ("[::1]", "::1",
241 "fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"):
242 assert _cephadm
.is_ipv6(good
)
243 for bad
in ("127.0.0.1",
244 "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffg",
245 "1:2:3:4:5:6:7:8:9", "fd00::1::1", "[fg::1]"):
246 assert not _cephadm
.is_ipv6(bad
)
248 def test_unwrap_ipv6(self
):
249 def unwrap_test(address
, expected
):
250 assert _cephadm
.unwrap_ipv6(address
) == expected
253 ('::1', '::1'), ('[::1]', '::1'),
254 ('[fde4:8dba:82e1:0:5054:ff:fe6a:357]', 'fde4:8dba:82e1:0:5054:ff:fe6a:357'),
255 ('can actually be any string', 'can actually be any string'),
256 ('[but needs to be stripped] ', '[but needs to be stripped] ')]
257 for address
, expected
in tests
:
258 unwrap_test(address
, expected
)
260 def test_wrap_ipv6(self
):
261 def wrap_test(address
, expected
):
262 assert _cephadm
.wrap_ipv6(address
) == expected
265 ('::1', '[::1]'), ('[::1]', '[::1]'),
266 ('fde4:8dba:82e1:0:5054:ff:fe6a:357',
267 '[fde4:8dba:82e1:0:5054:ff:fe6a:357]'),
268 ('myhost.example.com', 'myhost.example.com'),
269 ('192.168.0.1', '192.168.0.1'),
270 ('', ''), ('fd00::1::1', 'fd00::1::1')]
271 for address
, expected
in tests
:
272 wrap_test(address
, expected
)
274 @mock.patch('cephadm.Firewalld', mock_bad_firewalld
)
275 @mock.patch('cephadm.logger')
276 def test_skip_firewalld(self
, _logger
, cephadm_fs
):
278 test --skip-firewalld actually skips changing firewall
281 ctx
= _cephadm
.CephadmContext()
282 with pytest
.raises(Exception):
283 _cephadm
.update_firewalld(ctx
, 'mon')
285 ctx
.skip_firewalld
= True
286 _cephadm
.update_firewalld(ctx
, 'mon')
288 ctx
.skip_firewalld
= False
289 with pytest
.raises(Exception):
290 _cephadm
.update_firewalld(ctx
, 'mon')
292 ctx
= _cephadm
.CephadmContext()
293 ctx
.ssl_dashboard_port
= 8888
294 ctx
.dashboard_key
= None
295 ctx
.dashboard_password_noupdate
= True
296 ctx
.initial_dashboard_password
= 'password'
297 ctx
.initial_dashboard_user
= 'User'
298 with pytest
.raises(Exception):
299 _cephadm
.prepare_dashboard(ctx
, 0, 0, lambda _
, extra_mounts
=None, ___
=None : '5', lambda : None)
301 ctx
.skip_firewalld
= True
302 _cephadm
.prepare_dashboard(ctx
, 0, 0, lambda _
, extra_mounts
=None, ___
=None : '5', lambda : None)
304 ctx
.skip_firewalld
= False
305 with pytest
.raises(Exception):
306 _cephadm
.prepare_dashboard(ctx
, 0, 0, lambda _
, extra_mounts
=None, ___
=None : '5', lambda : None)
308 @mock.patch('cephadm.logger')
309 @mock.patch('cephadm.fetch_custom_config_files')
310 @mock.patch('cephadm.get_container')
311 def test_get_deployment_container(self
, _get_container
, _get_config
, _logger
):
313 test get_deployment_container properly makes use of extra container args and custom conf files
316 ctx
= _cephadm
.CephadmContext()
317 ctx
.config_json
= '-'
318 ctx
.extra_container_args
= [
319 '--pids-limit=12345',
322 ctx
.data_dir
= 'data'
323 _get_config
.return_value
= [
325 'mount_path': '/etc/testing.str',
326 'content': 'this\nis\na\nstring',
329 _get_container
.return_value
= _cephadm
.CephContainer
.for_daemon(
331 fsid
='9b9d7609-f4d5-4aba-94c8-effa764d96c9',
332 daemon_type
='grafana',
344 c
= _cephadm
.get_deployment_container(ctx
,
345 '9b9d7609-f4d5-4aba-94c8-effa764d96c9',
349 assert '--pids-limit=12345' in c
.container_args
350 assert '--something' in c
.container_args
351 assert os
.path
.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str') in c
.volume_mounts
352 assert c
.volume_mounts
[os
.path
.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str')] == '/etc/testing.str'
354 @mock.patch('cephadm.logger')
355 @mock.patch('cephadm.FileLock')
356 @mock.patch('cephadm.deploy_daemon')
357 @mock.patch('cephadm.fetch_configs')
358 @mock.patch('cephadm.make_var_run')
359 @mock.patch('cephadm.migrate_sysctl_dir')
360 @mock.patch('cephadm.check_unit', lambda *args
, **kwargs
: (None, 'running', None))
361 @mock.patch('cephadm.get_unit_name', lambda *args
, **kwargs
: 'mon-unit-name')
362 @mock.patch('cephadm.get_deployment_container')
363 @mock.patch('cephadm.read_configuration_source', lambda c
: {})
364 @mock.patch('cephadm.apply_deploy_config_to_ctx', lambda d
, c
: None)
365 @mock.patch('cephadm.extract_uid_gid', lambda *args
, **kwargs
: ('ceph', 'ceph'))
366 def test_mon_crush_location(self
, _get_deployment_container
, _migrate_sysctl
, _make_var_run
, _fetch_configs
, _deploy_daemon
, _file_lock
, _logger
):
368 test that crush location for mon is set if it is included in config_json
371 ctx
= _cephadm
.CephadmContext()
372 ctx
.name
= 'mon.test'
373 ctx
.fsid
= '9b9d7609-f4d5-4aba-94c8-effa764d96c9'
375 ctx
.container_engine
= mock_docker()
376 ctx
.allow_ptrace
= True
377 ctx
.config_json
= '-'
379 ctx
.tcp_ports
= '3300 6789'
380 _fetch_configs
.return_value
= {
381 'crush_location': 'database=a'
384 _get_deployment_container
.return_value
= _cephadm
.CephContainer
.for_daemon(
386 fsid
='9b9d7609-f4d5-4aba-94c8-effa764d96c9',
400 def _crush_location_checker(ctx
, fsid
, daemon_type
, daemon_id
, container
, uid
, gid
, **kwargs
):
401 print(container
.args
)
402 raise Exception(' '.join(container
.args
))
404 _deploy_daemon
.side_effect
= _crush_location_checker
406 with pytest
.raises(Exception, match
='--set-crush-location database=a'):
407 _cephadm
.command_deploy_from(ctx
)
409 @mock.patch('cephadm.logger')
410 @mock.patch('cephadm.fetch_custom_config_files')
411 def test_write_custom_conf_files(self
, _get_config
, _logger
, cephadm_fs
):
413 test _write_custom_conf_files writes the conf files correctly
416 ctx
= _cephadm
.CephadmContext()
417 ctx
.config_json
= '-'
418 ctx
.data_dir
= _cephadm
.DATA_DIR
419 _get_config
.return_value
= [
421 'mount_path': '/etc/testing.str',
422 'content': 'this\nis\na\nstring',
425 'mount_path': '/etc/testing.conf',
426 'content': 'very_cool_conf_setting: very_cool_conf_value\nx: y',
429 'mount_path': '/etc/no-content.conf',
432 _cephadm
._write
_custom
_conf
_files
(ctx
, 'mon', 'host1', 'fsid', 0, 0)
433 with
open(os
.path
.join(_cephadm
.DATA_DIR
, 'fsid', 'custom_config_files', 'mon.host1', 'testing.str'), 'r') as f
:
434 assert 'this\nis\na\nstring' == f
.read()
435 with
open(os
.path
.join(_cephadm
.DATA_DIR
, 'fsid', 'custom_config_files', 'mon.host1', 'testing.conf'), 'r') as f
:
436 assert 'very_cool_conf_setting: very_cool_conf_value\nx: y' == f
.read()
437 with pytest
.raises(FileNotFoundError
):
438 open(os
.path
.join(_cephadm
.DATA_DIR
, 'fsid', 'custom_config_files', 'mon.host1', 'no-content.conf'), 'r')
440 @mock.patch('cephadm.call_throws')
441 @mock.patch('cephadm.get_parm')
442 @mock.patch('cephadm.logger')
443 def test_registry_login(self
, _logger
, _get_parm
, _call_throws
):
444 # test normal valid login with url, username and password specified
445 _call_throws
.return_value
= '', '', 0
446 ctx
: _cephadm
.CephadmContext
= _cephadm
.cephadm_init_ctx(
447 ['registry-login', '--registry-url', 'sample-url',
448 '--registry-username', 'sample-user', '--registry-password',
450 ctx
.container_engine
= mock_docker()
451 retval
= _cephadm
.command_registry_login(ctx
)
454 # test bad login attempt with invalid arguments given
455 ctx
: _cephadm
.CephadmContext
= _cephadm
.cephadm_init_ctx(
456 ['registry-login', '--registry-url', 'bad-args-url'])
457 with pytest
.raises(Exception) as e
:
458 assert _cephadm
.command_registry_login(ctx
)
459 assert str(e
.value
) == ('Invalid custom registry arguments received. To login to a custom registry include '
460 '--registry-url, --registry-username and --registry-password options or --registry-json option')
462 # test normal valid login with json file
463 _get_parm
.return_value
= {"url": "sample-url", "username": "sample-username", "password": "sample-password"}
464 ctx
: _cephadm
.CephadmContext
= _cephadm
.cephadm_init_ctx(
465 ['registry-login', '--registry-json', 'sample-json'])
466 ctx
.container_engine
= mock_docker()
467 retval
= _cephadm
.command_registry_login(ctx
)
470 # test bad login attempt with bad json file
471 _get_parm
.return_value
= {"bad-json": "bad-json"}
472 ctx
: _cephadm
.CephadmContext
= _cephadm
.cephadm_init_ctx(
473 ['registry-login', '--registry-json', 'sample-json'])
474 with pytest
.raises(Exception) as e
:
475 assert _cephadm
.command_registry_login(ctx
)
476 assert str(e
.value
) == ("json provided for custom registry login did not include all necessary fields. "
477 "Please setup json file as\n"
479 " \"url\": \"REGISTRY_URL\",\n"
480 " \"username\": \"REGISTRY_USERNAME\",\n"
481 " \"password\": \"REGISTRY_PASSWORD\"\n"
484 # test login attempt with valid arguments where login command fails
485 _call_throws
.side_effect
= Exception
486 ctx
: _cephadm
.CephadmContext
= _cephadm
.cephadm_init_ctx(
487 ['registry-login', '--registry-url', 'sample-url',
488 '--registry-username', 'sample-user', '--registry-password',
490 with pytest
.raises(Exception) as e
:
491 _cephadm
.command_registry_login(ctx
)
492 assert str(e
.value
) == "Failed to login to custom registry @ sample-url as sample-user with given password"
494 def test_get_image_info_from_inspect(self
):
496 out
= """204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1,[docker.io/ceph/ceph@sha256:1cc9b824e1b076cdff52a9aa3f0cc8557d879fb2fbbba0cafed970aca59a3992]"""
497 r
= _cephadm
.get_image_info_from_inspect(out
, 'registry/ceph/ceph:latest')
500 'image_id': '204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1',
501 'repo_digests': ['docker.io/ceph/ceph@sha256:1cc9b824e1b076cdff52a9aa3f0cc8557d879fb2fbbba0cafed970aca59a3992']
505 out
= """sha256:16f4549cf7a8f112bbebf7946749e961fbbd1b0838627fe619aab16bc17ce552,[quay.ceph.io/ceph-ci/ceph@sha256:4e13da36c1bd6780b312a985410ae678984c37e6a9493a74c87e4a50b9bda41f]"""
506 r
= _cephadm
.get_image_info_from_inspect(out
, 'registry/ceph/ceph:latest')
508 'image_id': '16f4549cf7a8f112bbebf7946749e961fbbd1b0838627fe619aab16bc17ce552',
509 'repo_digests': ['quay.ceph.io/ceph-ci/ceph@sha256:4e13da36c1bd6780b312a985410ae678984c37e6a9493a74c87e4a50b9bda41f']
512 # multiple digests (podman)
513 out
= """e935122ab143a64d92ed1fbb27d030cf6e2f0258207be1baf1b509c466aeeb42,[docker.io/prom/prometheus@sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4 docker.io/prom/prometheus@sha256:efd99a6be65885c07c559679a0df4ec709604bcdd8cd83f0d00a1a683b28fb6a]"""
514 r
= _cephadm
.get_image_info_from_inspect(out
, 'registry/prom/prometheus:latest')
516 'image_id': 'e935122ab143a64d92ed1fbb27d030cf6e2f0258207be1baf1b509c466aeeb42',
518 'docker.io/prom/prometheus@sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4',
519 'docker.io/prom/prometheus@sha256:efd99a6be65885c07c559679a0df4ec709604bcdd8cd83f0d00a1a683b28fb6a',
524 def test_dict_get(self
):
525 result
= _cephadm
.dict_get({'a': 1}, 'a', require
=True)
527 result
= _cephadm
.dict_get({'a': 1}, 'b')
528 assert result
is None
529 result
= _cephadm
.dict_get({'a': 1}, 'b', default
=2)
532 def test_dict_get_error(self
):
533 with pytest
.raises(_cephadm
.Error
):
534 _cephadm
.dict_get({'a': 1}, 'b', require
=True)
536 def test_dict_get_join(self
):
537 result
= _cephadm
.dict_get_join({'foo': ['a', 'b']}, 'foo')
538 assert result
== 'a\nb'
539 result
= _cephadm
.dict_get_join({'foo': [1, 2]}, 'foo')
540 assert result
== '1\n2'
541 result
= _cephadm
.dict_get_join({'bar': 'a'}, 'bar')
543 result
= _cephadm
.dict_get_join({'a': 1}, 'a')
546 @mock.patch('os.listdir', return_value
=[])
547 @mock.patch('cephadm.logger')
548 def test_infer_local_ceph_image(self
, _logger
, _listdir
):
549 ctx
= _cephadm
.CephadmContext()
550 ctx
.fsid
= '00000000-0000-0000-0000-0000deadbeez'
551 ctx
.container_engine
= mock_podman()
553 # make sure the right image is selected when container is found
554 cinfo
= _cephadm
.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
555 'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
556 '514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d',
557 '2022-04-19 13:45:20.97146228 +0000 UTC',
559 out
= '''quay.ceph.io/ceph-ci/ceph@sha256:87f200536bb887b36b959e887d5984dd7a3f008a23aa1f283ab55d48b22c6185|dad864ee21e9|main|2022-03-23 16:29:19 +0000 UTC
560 quay.ceph.io/ceph-ci/ceph@sha256:b50b130fcda2a19f8507ddde3435bb4722266956e1858ac395c838bc1dcf1c0e|514e6a882f6e|pacific|2022-03-23 15:58:34 +0000 UTC
561 docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508|666bbfa87e8d|v15.2.5|2020-09-16 14:15:15 +0000 UTC'''
562 with mock
.patch('cephadm.call_throws', return_value
=(out
, '', '')):
563 with mock
.patch('cephadm.get_container_info', return_value
=cinfo
):
564 image
= _cephadm
.infer_local_ceph_image(ctx
, ctx
.container_engine
)
565 assert image
== 'quay.ceph.io/ceph-ci/ceph@sha256:b50b130fcda2a19f8507ddde3435bb4722266956e1858ac395c838bc1dcf1c0e'
567 # make sure first valid image is used when no container_info is found
568 out
= '''quay.ceph.io/ceph-ci/ceph@sha256:87f200536bb887b36b959e887d5984dd7a3f008a23aa1f283ab55d48b22c6185|dad864ee21e9|main|2022-03-23 16:29:19 +0000 UTC
569 quay.ceph.io/ceph-ci/ceph@sha256:b50b130fcda2a19f8507ddde3435bb4722266956e1858ac395c838bc1dcf1c0e|514e6a882f6e|pacific|2022-03-23 15:58:34 +0000 UTC
570 docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508|666bbfa87e8d|v15.2.5|2020-09-16 14:15:15 +0000 UTC'''
571 with mock
.patch('cephadm.call_throws', return_value
=(out
, '', '')):
572 with mock
.patch('cephadm.get_container_info', return_value
=None):
573 image
= _cephadm
.infer_local_ceph_image(ctx
, ctx
.container_engine
)
574 assert image
== 'quay.ceph.io/ceph-ci/ceph@sha256:87f200536bb887b36b959e887d5984dd7a3f008a23aa1f283ab55d48b22c6185'
576 # make sure images without digest are discarded (no container_info is found)
577 out
= '''quay.ceph.io/ceph-ci/ceph@|||
578 docker.io/ceph/ceph@|||
579 docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508|666bbfa87e8d|v15.2.5|2020-09-16 14:15:15 +0000 UTC'''
580 with mock
.patch('cephadm.call_throws', return_value
=(out
, '', '')):
581 with mock
.patch('cephadm.get_container_info', return_value
=None):
582 image
= _cephadm
.infer_local_ceph_image(ctx
, ctx
.container_engine
)
583 assert image
== 'docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508'
587 @pytest.mark
.parametrize('daemon_filter, by_name, daemon_list, container_stats, output',
589 # get container info by type ('mon')
594 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
595 {'name': 'mgr.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
597 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
600 _cephadm
.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
601 'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
602 '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
603 '2022-04-19 13:45:20.97146228 +0000 UTC',
606 # get container info by name ('mon.ceph-node-0')
611 {'name': 'mgr.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
612 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
614 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
617 _cephadm
.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
618 'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
619 '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
620 '2022-04-19 13:45:20.97146228 +0000 UTC',
623 # get container info by name (same daemon but two different fsids)
628 {'name': 'mon.ceph-node-0', 'fsid': '10000000-0000-0000-0000-0000deadbeef'},
629 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
631 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
634 _cephadm
.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
635 'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
636 '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
637 '2022-04-19 13:45:20.97146228 +0000 UTC',
640 # get container info by type (bad container stats: 127 code)
645 {'name': 'mon.ceph-node-0', 'fsid': '00000000-FFFF-0000-0000-0000deadbeef'},
646 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
653 # get container info by name (bad container stats: 127 code)
658 {'name': 'mgr.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
659 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
666 # get container info by invalid name (doens't contain '.')
671 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
672 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
674 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
679 # get container info by invalid name (empty)
684 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
685 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
687 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
692 # get container info by invalid type (empty)
697 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
698 {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
700 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
705 # get container info by name: no match (invalid fsid)
710 {'name': 'mon.ceph-node-0', 'fsid': '00000000-1111-0000-0000-0000deadbeef'},
711 {'name': 'mon.ceph-node-0', 'fsid': '00000000-2222-0000-0000-0000deadbeef'},
713 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
718 # get container info by name: no match
726 # get container info by type: no match
735 @mock.patch('cephadm.logger')
736 def test_get_container_info(self
, _logger
, daemon_filter
, by_name
, daemon_list
, container_stats
, output
):
737 ctx
= _cephadm
.CephadmContext()
738 ctx
.fsid
= '00000000-0000-0000-0000-0000deadbeef'
739 ctx
.container_engine
= mock_podman()
740 with mock
.patch('cephadm.list_daemons', return_value
=daemon_list
):
741 with mock
.patch('cephadm.get_container_stats', return_value
=container_stats
):
742 assert _cephadm
.get_container_info(ctx
, daemon_filter
, by_name
) == output
744 def test_should_log_to_journald(self
):
745 ctx
= _cephadm
.CephadmContext()
747 ctx
.log_to_journald
= True
748 assert _cephadm
.should_log_to_journald(ctx
)
750 ctx
.log_to_journald
= None
751 # enable if podman support --cgroup=split
752 ctx
.container_engine
= mock_podman()
753 ctx
.container_engine
.version
= (2, 1, 0)
754 assert _cephadm
.should_log_to_journald(ctx
)
756 # disable on old podman
757 ctx
.container_engine
.version
= (2, 0, 0)
758 assert not _cephadm
.should_log_to_journald(ctx
)
761 ctx
.container_engine
= mock_docker()
762 assert not _cephadm
.should_log_to_journald(ctx
)
764 def test_normalize_image_digest(self
):
765 s
= 'myhostname:5000/ceph/ceph@sha256:753886ad9049004395ae990fbb9b096923b5a518b819283141ee8716ddf55ad1'
766 assert _cephadm
.normalize_image_digest(s
) == s
768 s
= 'ceph/ceph:latest'
769 assert _cephadm
.normalize_image_digest(s
) == f
'{_cephadm.DEFAULT_REGISTRY}/{s}'
771 @pytest.mark
.parametrize('fsid, ceph_conf, list_daemons, result, err, ',
781 '00000000-0000-0000-0000-0000deadbeef',
784 '00000000-0000-0000-0000-0000deadbeef',
788 '00000000-0000-0000-0000-0000deadbeef',
791 {'fsid': '10000000-0000-0000-0000-0000deadbeef'},
792 {'fsid': '20000000-0000-0000-0000-0000deadbeef'},
794 '00000000-0000-0000-0000-0000deadbeef',
801 {'fsid': '00000000-0000-0000-0000-0000deadbeef'},
803 '00000000-0000-0000-0000-0000deadbeef',
810 {'fsid': '10000000-0000-0000-0000-0000deadbeef'},
811 {'fsid': '20000000-0000-0000-0000-0000deadbeef'},
814 r
'Cannot infer an fsid',
818 get_ceph_conf(fsid
='00000000-0000-0000-0000-0000deadbeef'),
820 '00000000-0000-0000-0000-0000deadbeef',
825 get_ceph_conf(fsid
='00000000-0000-0000-0000-0000deadbeef'),
827 {'fsid': '00000000-0000-0000-0000-0000deadbeef'},
829 '00000000-0000-0000-0000-0000deadbeef',
834 get_ceph_conf(fsid
='00000000-0000-0000-0000-0000deadbeef'),
836 {'fsid': '10000000-0000-0000-0000-0000deadbeef'},
837 {'fsid': '20000000-0000-0000-0000-0000deadbeef'},
840 r
'Cannot infer an fsid',
843 @mock.patch('cephadm.call')
844 @mock.patch('cephadm.logger')
845 def test_infer_fsid(self
, _logger
, _call
, fsid
, ceph_conf
, list_daemons
, result
, err
, cephadm_fs
):
847 ctx
= _cephadm
.CephadmContext()
851 mock_fn
= mock
.Mock()
852 mock_fn
.return_value
= 0
853 infer_fsid
= _cephadm
.infer_fsid(mock_fn
)
855 # mock the ceph.conf file content
857 f
= cephadm_fs
.create_file('ceph.conf', contents
=ceph_conf
)
861 with mock
.patch('cephadm.list_daemons', return_value
=list_daemons
):
863 with pytest
.raises(_cephadm
.Error
, match
=err
):
867 assert ctx
.fsid
== result
869 @pytest.mark
.parametrize('fsid, other_conf_files, config, name, list_daemons, result, ',
871 # per cluster conf has more precedence than default conf
873 '00000000-0000-0000-0000-0000deadbeef',
874 [_cephadm
.CEPH_DEFAULT_CONF
],
878 '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
880 # mon daemon conf has more precedence than cluster conf and default conf
882 '00000000-0000-0000-0000-0000deadbeef',
883 ['/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
884 _cephadm
.CEPH_DEFAULT_CONF
],
887 [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}],
888 '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config',
890 # daemon conf (--name option) has more precedence than cluster, default and mon conf
892 '00000000-0000-0000-0000-0000deadbeef',
893 ['/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
894 '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config',
895 _cephadm
.CEPH_DEFAULT_CONF
],
898 [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'},
899 {'name': 'osd.0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}],
900 '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/osd.0/config',
902 # user provided conf ('/foo/ceph.conf') more precedence than any other conf
904 '00000000-0000-0000-0000-0000deadbeef',
905 ['/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
906 _cephadm
.CEPH_DEFAULT_CONF
,
907 '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config'],
910 [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}],
914 @mock.patch('cephadm.call')
915 @mock.patch('cephadm.logger')
916 def test_infer_config_precedence(self
, _logger
, _call
, other_conf_files
, fsid
, config
, name
, list_daemons
, result
, cephadm_fs
):
918 ctx
= _cephadm
.CephadmContext()
924 mock_fn
= mock
.Mock()
925 mock_fn
.return_value
= 0
926 infer_config
= _cephadm
.infer_config(mock_fn
)
928 # mock the config file
929 cephadm_fs
.create_file(result
)
931 # mock other potential config files
932 for f
in other_conf_files
:
933 cephadm_fs
.create_file(f
)
936 with mock
.patch('cephadm.list_daemons', return_value
=list_daemons
):
938 assert ctx
.config
== result
940 @pytest.mark
.parametrize('fsid, config, name, list_daemons, result, ',
950 '00000000-0000-0000-0000-0000deadbeef',
954 _cephadm
.CEPH_DEFAULT_CONF
,
957 '00000000-0000-0000-0000-0000deadbeef',
961 '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
964 '00000000-0000-0000-0000-0000deadbeef',
967 [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}],
968 '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config',
971 '00000000-0000-0000-0000-0000deadbeef',
974 [{'name': 'mon.a', 'fsid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'style': 'cephadm:v1'}],
975 _cephadm
.CEPH_DEFAULT_CONF
,
978 '00000000-0000-0000-0000-0000deadbeef',
981 [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'legacy'}],
982 _cephadm
.CEPH_DEFAULT_CONF
,
985 '00000000-0000-0000-0000-0000deadbeef',
989 _cephadm
.CEPH_DEFAULT_CONF
,
992 '00000000-0000-0000-0000-0000deadbeef',
995 [{'name': 'mon.a', 'style': 'cephadm:v1'}],
999 '00000000-0000-0000-0000-0000deadbeef',
1003 '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config',
1006 '00000000-0000-0000-0000-0000deadbeef',
1010 '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/osd.0/config',
1017 _cephadm
.CEPH_DEFAULT_CONF
,
1020 @mock.patch('cephadm.call')
1021 @mock.patch('cephadm.logger')
1022 def test_infer_config(self
, _logger
, _call
, fsid
, config
, name
, list_daemons
, result
, cephadm_fs
):
1024 ctx
= _cephadm
.CephadmContext()
1029 # mock the decorator
1030 mock_fn
= mock
.Mock()
1031 mock_fn
.return_value
= 0
1032 infer_config
= _cephadm
.infer_config(mock_fn
)
1034 # mock the config file
1035 cephadm_fs
.create_file(result
)
1038 with mock
.patch('cephadm.list_daemons', return_value
=list_daemons
):
1040 assert ctx
.config
== result
1042 @mock.patch('cephadm.call')
1043 def test_extract_uid_gid_fail(self
, _call
):
1044 err
= """Error: container_linux.go:370: starting container process caused: process_linux.go:459: container init caused: process_linux.go:422: setting cgroup config for procHooks process caused: Unit libpod-056038e1126191fba41d8a037275136f2d7aeec9710b9ee
1045 ff792c06d8544b983.scope not found.: OCI runtime error"""
1046 _call
.return_value
= ('', err
, 127)
1047 ctx
= _cephadm
.CephadmContext()
1048 ctx
.container_engine
= mock_podman()
1049 with pytest
.raises(_cephadm
.Error
, match
='OCI'):
1050 _cephadm
.extract_uid_gid(ctx
)
1052 @pytest.mark
.parametrize('test_input, expected', [
1053 ([_cephadm
.make_fsid(), _cephadm
.make_fsid(), _cephadm
.make_fsid()], 3),
1054 ([_cephadm
.make_fsid(), 'invalid-fsid', _cephadm
.make_fsid(), '0b87e50c-8e77-11ec-b890-'], 2),
1055 (['f6860ec2-8e76-11ec-', '0b87e50c-8e77-11ec-b890-', ''], 0),
1058 def test_get_ceph_cluster_count(self
, test_input
, expected
):
1059 ctx
= _cephadm
.CephadmContext()
1060 with mock
.patch('os.listdir', return_value
=test_input
):
1061 assert _cephadm
.get_ceph_cluster_count(ctx
) == expected
1063 def test_set_image_minimize_config(self
):
1065 raise _cephadm
.Error(' '.join(cmd
))
1066 ctx
= _cephadm
.CephadmContext()
1067 ctx
.image
= 'test_image'
1068 ctx
.no_minimize_config
= True
1069 fake_cli
= lambda cmd
, __
=None, ___
=None: throw_cmd(cmd
)
1070 with pytest
.raises(_cephadm
.Error
, match
='config set global container_image test_image'):
1071 _cephadm
.finish_bootstrap_config(
1073 fsid
=_cephadm
.make_fsid(),
1075 mon_id
='a', mon_dir
='mon_dir',
1076 mon_network
=None, ipv6
=False,
1078 cluster_network
=None,
1079 ipv6_cluster_network
=False
1083 class TestCustomContainer(unittest
.TestCase
):
1084 cc
: _cephadm
.CustomContainer
1087 self
.cc
= _cephadm
.CustomContainer(
1088 'e863154d-33c7-4350-bca5-921e0467e55b',
1091 'entrypoint': 'bash',
1097 'envs': ['SECRET=password'],
1098 'ports': [8080, 8443],
1100 '/CONFIG_DIR': '/foo/conf',
1101 'bar/config': '/bar:ro'
1106 'source=/CONFIG_DIR',
1107 'destination=/foo/conf',
1112 'source=bar/config',
1113 'destination=/bar:ro',
1118 image
='docker.io/library/hello-world:latest'
1121 def test_entrypoint(self
):
1122 self
.assertEqual(self
.cc
.entrypoint
, 'bash')
1124 def test_uid_gid(self
):
1125 self
.assertEqual(self
.cc
.uid
, 65534)
1126 self
.assertEqual(self
.cc
.gid
, 1000)
1128 def test_ports(self
):
1129 self
.assertEqual(self
.cc
.ports
, [8080, 8443])
1131 def test_get_container_args(self
):
1132 result
= self
.cc
.get_container_args()
1133 self
.assertEqual(result
, [
1138 def test_get_container_envs(self
):
1139 result
= self
.cc
.get_container_envs()
1140 self
.assertEqual(result
, ['SECRET=password'])
1142 def test_get_container_mounts(self
):
1143 result
= self
.cc
.get_container_mounts('/xyz')
1144 self
.assertDictEqual(result
, {
1145 '/CONFIG_DIR': '/foo/conf',
1146 '/xyz/bar/config': '/bar:ro'
1149 def test_get_container_binds(self
):
1150 result
= self
.cc
.get_container_binds('/xyz')
1151 self
.assertEqual(result
, [
1154 'source=/CONFIG_DIR',
1155 'destination=/foo/conf',
1160 'source=/xyz/bar/config',
1161 'destination=/bar:ro',
1167 class TestMaintenance
:
1168 systemd_target
= "ceph.00000000-0000-0000-0000-000000c0ffee.target"
1169 fsid
= '0ea8cdd0-1bbf-11ec-a9c7-5254002763fa'
1171 def test_systemd_target_OK(self
, tmp_path
):
1173 wants
= base
/ "ceph.target.wants"
1175 target
= wants
/ TestMaintenance
.systemd_target
1177 ctx
= _cephadm
.CephadmContext()
1178 ctx
.unit_dir
= str(base
)
1180 assert _cephadm
.systemd_target_state(ctx
, target
.name
)
1182 def test_systemd_target_NOTOK(self
, tmp_path
):
1184 ctx
= _cephadm
.CephadmContext()
1185 ctx
.unit_dir
= str(base
)
1186 assert not _cephadm
.systemd_target_state(ctx
, TestMaintenance
.systemd_target
)
1188 def test_parser_OK(self
):
1189 args
= _cephadm
._parse
_args
(['host-maintenance', 'enter'])
1190 assert args
.maintenance_action
== 'enter'
1192 def test_parser_BAD(self
):
1193 with pytest
.raises(SystemExit):
1194 _cephadm
._parse
_args
(['host-maintenance', 'wah'])
1196 @mock.patch('os.listdir', return_value
=[])
1197 @mock.patch('cephadm.call')
1198 @mock.patch('cephadm.logger')
1199 @mock.patch('cephadm.systemd_target_state')
1200 def test_enter_failure_1(self
, _target_state
, _logger
, _call
, _listdir
):
1201 _call
.return_value
= '', '', 999
1202 _target_state
.return_value
= True
1203 ctx
: _cephadm
.CephadmContext
= _cephadm
.cephadm_init_ctx(
1204 ['host-maintenance', 'enter', '--fsid', TestMaintenance
.fsid
])
1205 ctx
.container_engine
= mock_podman()
1206 retval
= _cephadm
.command_maintenance(ctx
)
1207 assert retval
.startswith('failed')
1209 @mock.patch('os.listdir', return_value
=[])
1210 @mock.patch('cephadm.call')
1211 @mock.patch('cephadm.logger')
1212 @mock.patch('cephadm.systemd_target_state')
1213 def test_enter_failure_2(self
, _target_state
, _logger
, _call
, _listdir
):
1214 _call
.side_effect
= [('', '', 0), ('', '', 999), ('', '', 0), ('', '', 999)]
1215 _target_state
.return_value
= True
1216 ctx
: _cephadm
.CephadmContext
= _cephadm
.cephadm_init_ctx(
1217 ['host-maintenance', 'enter', '--fsid', TestMaintenance
.fsid
])
1218 ctx
.container_engine
= mock_podman()
1219 retval
= _cephadm
.command_maintenance(ctx
)
1220 assert retval
.startswith('failed')
1222 @mock.patch('os.listdir', return_value
=[])
1223 @mock.patch('cephadm.call')
1224 @mock.patch('cephadm.logger')
1225 @mock.patch('cephadm.systemd_target_state')
1226 @mock.patch('cephadm.target_exists')
1227 def test_exit_failure_1(self
, _target_exists
, _target_state
, _logger
, _call
, _listdir
):
1228 _call
.return_value
= '', '', 999
1229 _target_state
.return_value
= False
1230 _target_exists
.return_value
= True
1231 ctx
: _cephadm
.CephadmContext
= _cephadm
.cephadm_init_ctx(
1232 ['host-maintenance', 'exit', '--fsid', TestMaintenance
.fsid
])
1233 ctx
.container_engine
= mock_podman()
1234 retval
= _cephadm
.command_maintenance(ctx
)
1235 assert retval
.startswith('failed')
1237 @mock.patch('os.listdir', return_value
=[])
1238 @mock.patch('cephadm.call')
1239 @mock.patch('cephadm.logger')
1240 @mock.patch('cephadm.systemd_target_state')
1241 @mock.patch('cephadm.target_exists')
1242 def test_exit_failure_2(self
, _target_exists
, _target_state
, _logger
, _call
, _listdir
):
1243 _call
.side_effect
= [('', '', 0), ('', '', 999), ('', '', 0), ('', '', 999)]
1244 _target_state
.return_value
= False
1245 _target_exists
.return_value
= True
1246 ctx
: _cephadm
.CephadmContext
= _cephadm
.cephadm_init_ctx(
1247 ['host-maintenance', 'exit', '--fsid', TestMaintenance
.fsid
])
1248 ctx
.container_engine
= mock_podman()
1249 retval
= _cephadm
.command_maintenance(ctx
)
1250 assert retval
.startswith('failed')
1253 class TestMonitoring(object):
1254 @mock.patch('cephadm.call')
1255 def test_get_version_alertmanager(self
, _call
):
1256 ctx
= _cephadm
.CephadmContext()
1257 ctx
.container_engine
= mock_podman()
1258 daemon_type
= 'alertmanager'
1260 # binary `prometheus`
1261 _call
.return_value
= '', '{}, version 0.16.1'.format(daemon_type
), 0
1262 version
= _cephadm
.Monitoring
.get_version(ctx
, 'container_id', daemon_type
)
1263 assert version
== '0.16.1'
1265 # binary `prometheus-alertmanager`
1266 _call
.side_effect
= (
1268 ('', '{}, version 0.16.1'.format(daemon_type
), 0),
1270 version
= _cephadm
.Monitoring
.get_version(ctx
, 'container_id', daemon_type
)
1271 assert version
== '0.16.1'
1273 @mock.patch('cephadm.call')
1274 def test_get_version_prometheus(self
, _call
):
1275 ctx
= _cephadm
.CephadmContext()
1276 ctx
.container_engine
= mock_podman()
1277 daemon_type
= 'prometheus'
1278 _call
.return_value
= '', '{}, version 0.16.1'.format(daemon_type
), 0
1279 version
= _cephadm
.Monitoring
.get_version(ctx
, 'container_id', daemon_type
)
1280 assert version
== '0.16.1'
1282 def test_prometheus_external_url(self
):
1283 ctx
= _cephadm
.CephadmContext()
1284 ctx
.config_json
= json
.dumps({'files': {}, 'retention_time': '15d'})
1285 daemon_type
= 'prometheus'
1287 fsid
= 'aaf5a720-13fe-4a3b-82b9-2d99b7fd9704'
1288 args
= _cephadm
.get_daemon_args(ctx
, fsid
, daemon_type
, daemon_id
)
1289 assert any([x
.startswith('--web.external-url=http://') for x
in args
])
1291 @mock.patch('cephadm.call')
1292 def test_get_version_node_exporter(self
, _call
):
1293 ctx
= _cephadm
.CephadmContext()
1294 ctx
.container_engine
= mock_podman()
1295 daemon_type
= 'node-exporter'
1296 _call
.return_value
= '', '{}, version 0.16.1'.format(daemon_type
.replace('-', '_')), 0
1297 version
= _cephadm
.Monitoring
.get_version(ctx
, 'container_id', daemon_type
)
1298 assert version
== '0.16.1'
1300 def test_create_daemon_dirs_prometheus(self
, cephadm_fs
):
1302 Ensures the required and optional files given in the configuration are
1303 created and mapped correctly inside the container. Tests absolute and
1304 relative file paths given in the configuration.
1307 fsid
= 'aaf5a720-13fe-4a3b-82b9-2d99b7fd9704'
1308 daemon_type
= 'prometheus'
1311 ctx
= _cephadm
.CephadmContext()
1312 ctx
.data_dir
= '/somedir'
1313 ctx
.config_json
= json
.dumps({
1315 'prometheus.yml': 'foo',
1316 '/etc/prometheus/alerting/ceph_alerts.yml': 'bar'
1320 _cephadm
.create_daemon_dirs(ctx
,
1329 prefix
= '{data_dir}/{fsid}/{daemon_type}.{daemon_id}'.format(
1330 data_dir
=ctx
.data_dir
,
1332 daemon_type
=daemon_type
,
1337 'etc/prometheus/prometheus.yml': 'foo',
1338 'etc/prometheus/alerting/ceph_alerts.yml': 'bar',
1341 for file,content
in expected
.items():
1342 file = os
.path
.join(prefix
, file)
1343 assert os
.path
.exists(file)
1344 with
open(file) as f
:
1345 assert f
.read() == content
1347 # assert uid/gid after redeploy
1350 _cephadm
.create_daemon_dirs(ctx
,
1358 for file,content
in expected
.items():
1359 file = os
.path
.join(prefix
, file)
1360 assert os
.stat(file).st_uid
== new_uid
1361 assert os
.stat(file).st_gid
== new_gid
1364 class TestBootstrap(object):
1367 def _get_cmd(*args
):
1370 '--allow-mismatched-release',
1371 '--skip-prepare-host',
1377 ###############################################3
1379 def test_config(self
, cephadm_fs
):
1381 cmd
= self
._get
_cmd
(
1382 '--mon-ip', '192.168.1.1',
1383 '--skip-mon-network',
1384 '--config', conf_file
,
1387 with
with_cephadm_ctx(cmd
) as ctx
:
1388 msg
= r
'No such file or directory'
1389 with pytest
.raises(_cephadm
.Error
, match
=msg
):
1390 _cephadm
.command_bootstrap(ctx
)
1392 cephadm_fs
.create_file(conf_file
)
1393 with
with_cephadm_ctx(cmd
) as ctx
:
1394 retval
= _cephadm
.command_bootstrap(ctx
)
1397 def test_no_mon_addr(self
, cephadm_fs
):
1398 cmd
= self
._get
_cmd
()
1399 with
with_cephadm_ctx(cmd
) as ctx
:
1400 msg
= r
'must specify --mon-ip or --mon-addrv'
1401 with pytest
.raises(_cephadm
.Error
, match
=msg
):
1402 _cephadm
.command_bootstrap(ctx
)
1404 def test_skip_mon_network(self
, cephadm_fs
):
1405 cmd
= self
._get
_cmd
('--mon-ip', '192.168.1.1')
1407 with
with_cephadm_ctx(cmd
, list_networks
={}) as ctx
:
1408 msg
= r
'--skip-mon-network'
1409 with pytest
.raises(_cephadm
.Error
, match
=msg
):
1410 _cephadm
.command_bootstrap(ctx
)
1412 cmd
+= ['--skip-mon-network']
1413 with
with_cephadm_ctx(cmd
, list_networks
={}) as ctx
:
1414 retval
= _cephadm
.command_bootstrap(ctx
)
1417 @pytest.mark
.parametrize('mon_ip, list_networks, result',
1422 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1427 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1432 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1437 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1442 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1447 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1453 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1457 '::ffff:192.168.1.0',
1458 {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
1462 '::ffff:192.168.1.1',
1463 {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
1468 {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
1472 '[::ffff:c0a8:101]:1234',
1473 {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
1477 '[::ffff:c0a8:101]:0123',
1478 {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
1482 '0000:0000:0000:0000:0000:FFFF:C0A8:0101',
1483 {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
1487 def test_mon_ip(self
, mon_ip
, list_networks
, result
, cephadm_fs
):
1488 cmd
= self
._get
_cmd
('--mon-ip', mon_ip
)
1490 with
with_cephadm_ctx(cmd
, list_networks
=list_networks
) as ctx
:
1491 msg
= r
'--skip-mon-network'
1492 with pytest
.raises(_cephadm
.Error
, match
=msg
):
1493 _cephadm
.command_bootstrap(ctx
)
1495 with
with_cephadm_ctx(cmd
, list_networks
=list_networks
) as ctx
:
1496 retval
= _cephadm
.command_bootstrap(ctx
)
1499 @pytest.mark
.parametrize('mon_addrv, list_networks, err',
1504 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1505 r
'must use square brackets',
1509 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1510 r
'must include port number',
1513 '[192.168.1.1:1234]',
1514 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1518 '[192.168.1.1:0123]',
1519 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1523 '[v2:192.168.1.1:3300,v1:192.168.1.1:6789]',
1524 {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
1529 '[::ffff:192.168.1.1:1234]',
1530 {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}},
1534 '[::ffff:192.168.1.1:0123]',
1535 {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}},
1539 '[0000:0000:0000:0000:0000:FFFF:C0A8:0101:1234]',
1540 {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}},
1544 '[v2:0000:0000:0000:0000:0000:FFFF:C0A8:0101:3300,v1:0000:0000:0000:0000:0000:FFFF:C0A8:0101:6789]',
1545 {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}},
1549 def test_mon_addrv(self
, mon_addrv
, list_networks
, err
, cephadm_fs
):
1550 cmd
= self
._get
_cmd
('--mon-addrv', mon_addrv
)
1552 with
with_cephadm_ctx(cmd
, list_networks
=list_networks
) as ctx
:
1553 with pytest
.raises(_cephadm
.Error
, match
=err
):
1554 _cephadm
.command_bootstrap(ctx
)
1556 with
with_cephadm_ctx(cmd
, list_networks
=list_networks
) as ctx
:
1557 retval
= _cephadm
.command_bootstrap(ctx
)
1560 def test_allow_fqdn_hostname(self
, cephadm_fs
):
1561 hostname
= 'foo.bar'
1562 cmd
= self
._get
_cmd
(
1563 '--mon-ip', '192.168.1.1',
1564 '--skip-mon-network',
1567 with
with_cephadm_ctx(cmd
, hostname
=hostname
) as ctx
:
1568 msg
= r
'--allow-fqdn-hostname'
1569 with pytest
.raises(_cephadm
.Error
, match
=msg
):
1570 _cephadm
.command_bootstrap(ctx
)
1572 cmd
+= ['--allow-fqdn-hostname']
1573 with
with_cephadm_ctx(cmd
, hostname
=hostname
) as ctx
:
1574 retval
= _cephadm
.command_bootstrap(ctx
)
1577 @pytest.mark
.parametrize('fsid, err',
1580 ('00000000-0000-0000-0000-0000deadbeef', None),
1581 ('00000000-0000-0000-0000-0000deadbeez', 'not an fsid'),
1583 def test_fsid(self
, fsid
, err
, cephadm_fs
):
1584 cmd
= self
._get
_cmd
(
1585 '--mon-ip', '192.168.1.1',
1586 '--skip-mon-network',
1590 with
with_cephadm_ctx(cmd
) as ctx
:
1592 with pytest
.raises(_cephadm
.Error
, match
=err
):
1593 _cephadm
.command_bootstrap(ctx
)
1595 retval
= _cephadm
.command_bootstrap(ctx
)
1599 class TestShell(object):
1601 def test_fsid(self
, cephadm_fs
):
1602 fsid
= '00000000-0000-0000-0000-0000deadbeef'
1604 cmd
= ['shell', '--fsid', fsid
]
1605 with
with_cephadm_ctx(cmd
) as ctx
:
1606 retval
= _cephadm
.command_shell(ctx
)
1608 assert ctx
.fsid
== fsid
1610 cmd
= ['shell', '--fsid', '00000000-0000-0000-0000-0000deadbeez']
1611 with
with_cephadm_ctx(cmd
) as ctx
:
1613 with pytest
.raises(_cephadm
.Error
, match
=err
):
1614 retval
= _cephadm
.command_shell(ctx
)
1616 assert ctx
.fsid
== None
1618 s
= get_ceph_conf(fsid
=fsid
)
1619 f
= cephadm_fs
.create_file('ceph.conf', contents
=s
)
1621 cmd
= ['shell', '--fsid', fsid
, '--config', f
.path
]
1622 with
with_cephadm_ctx(cmd
) as ctx
:
1623 retval
= _cephadm
.command_shell(ctx
)
1625 assert ctx
.fsid
== fsid
1627 cmd
= ['shell', '--fsid', '10000000-0000-0000-0000-0000deadbeef', '--config', f
.path
]
1628 with
with_cephadm_ctx(cmd
) as ctx
:
1629 err
= 'fsid does not match ceph.conf'
1630 with pytest
.raises(_cephadm
.Error
, match
=err
):
1631 retval
= _cephadm
.command_shell(ctx
)
1633 assert ctx
.fsid
== None
1635 def test_name(self
, cephadm_fs
):
1636 cmd
= ['shell', '--name', 'foo']
1637 with
with_cephadm_ctx(cmd
) as ctx
:
1638 retval
= _cephadm
.command_shell(ctx
)
1641 cmd
= ['shell', '--name', 'foo.bar']
1642 with
with_cephadm_ctx(cmd
) as ctx
:
1643 err
= r
'must pass --fsid'
1644 with pytest
.raises(_cephadm
.Error
, match
=err
):
1645 retval
= _cephadm
.command_shell(ctx
)
1648 fsid
= '00000000-0000-0000-0000-0000deadbeef'
1649 cmd
= ['shell', '--name', 'foo.bar', '--fsid', fsid
]
1650 with
with_cephadm_ctx(cmd
) as ctx
:
1651 retval
= _cephadm
.command_shell(ctx
)
1654 def test_config(self
, cephadm_fs
):
1656 with
with_cephadm_ctx(cmd
) as ctx
:
1657 retval
= _cephadm
.command_shell(ctx
)
1659 assert ctx
.config
== None
1661 cephadm_fs
.create_file(_cephadm
.CEPH_DEFAULT_CONF
)
1662 with
with_cephadm_ctx(cmd
) as ctx
:
1663 retval
= _cephadm
.command_shell(ctx
)
1665 assert ctx
.config
== _cephadm
.CEPH_DEFAULT_CONF
1667 cmd
= ['shell', '--config', 'foo']
1668 with
with_cephadm_ctx(cmd
) as ctx
:
1669 retval
= _cephadm
.command_shell(ctx
)
1671 assert ctx
.config
== 'foo'
1673 def test_keyring(self
, cephadm_fs
):
1675 with
with_cephadm_ctx(cmd
) as ctx
:
1676 retval
= _cephadm
.command_shell(ctx
)
1678 assert ctx
.keyring
== None
1680 cephadm_fs
.create_file(_cephadm
.CEPH_DEFAULT_KEYRING
)
1681 with
with_cephadm_ctx(cmd
) as ctx
:
1682 retval
= _cephadm
.command_shell(ctx
)
1684 assert ctx
.keyring
== _cephadm
.CEPH_DEFAULT_KEYRING
1686 cmd
= ['shell', '--keyring', 'foo']
1687 with
with_cephadm_ctx(cmd
) as ctx
:
1688 retval
= _cephadm
.command_shell(ctx
)
1690 assert ctx
.keyring
== 'foo'
1692 @mock.patch('cephadm.CephContainer')
1693 def test_mount_no_dst(self
, _ceph_container
, cephadm_fs
):
1694 cmd
= ['shell', '--mount', '/etc/foo']
1695 with
with_cephadm_ctx(cmd
) as ctx
:
1696 retval
= _cephadm
.command_shell(ctx
)
1698 assert _ceph_container
.call_args
.kwargs
['volume_mounts']['/etc/foo'] == '/mnt/foo'
1700 @mock.patch('cephadm.CephContainer')
1701 def test_mount_with_dst_no_opt(self
, _ceph_container
, cephadm_fs
):
1702 cmd
= ['shell', '--mount', '/etc/foo:/opt/foo/bar']
1703 with
with_cephadm_ctx(cmd
) as ctx
:
1704 retval
= _cephadm
.command_shell(ctx
)
1706 assert _ceph_container
.call_args
.kwargs
['volume_mounts']['/etc/foo'] == '/opt/foo/bar'
1708 @mock.patch('cephadm.CephContainer')
1709 def test_mount_with_dst_and_opt(self
, _ceph_container
, cephadm_fs
):
1710 cmd
= ['shell', '--mount', '/etc/foo:/opt/foo/bar:Z']
1711 with
with_cephadm_ctx(cmd
) as ctx
:
1712 retval
= _cephadm
.command_shell(ctx
)
1714 assert _ceph_container
.call_args
.kwargs
['volume_mounts']['/etc/foo'] == '/opt/foo/bar:Z'
1716 class TestCephVolume(object):
1719 def _get_cmd(*args
):
1723 '--', 'inventory', '--format', 'json'
1726 def test_noop(self
, cephadm_fs
):
1727 cmd
= self
._get
_cmd
()
1728 with
with_cephadm_ctx(cmd
) as ctx
:
1729 _cephadm
.command_ceph_volume(ctx
)
1730 assert ctx
.fsid
== None
1731 assert ctx
.config
== None
1732 assert ctx
.keyring
== None
1733 assert ctx
.config_json
== None
1735 def test_fsid(self
, cephadm_fs
):
1736 fsid
= '00000000-0000-0000-0000-0000deadbeef'
1738 cmd
= self
._get
_cmd
('--fsid', fsid
)
1739 with
with_cephadm_ctx(cmd
) as ctx
:
1740 _cephadm
.command_ceph_volume(ctx
)
1741 assert ctx
.fsid
== fsid
1743 cmd
= self
._get
_cmd
('--fsid', '00000000-0000-0000-0000-0000deadbeez')
1744 with
with_cephadm_ctx(cmd
) as ctx
:
1746 with pytest
.raises(_cephadm
.Error
, match
=err
):
1747 retval
= _cephadm
.command_shell(ctx
)
1749 assert ctx
.fsid
== None
1751 s
= get_ceph_conf(fsid
=fsid
)
1752 f
= cephadm_fs
.create_file('ceph.conf', contents
=s
)
1754 cmd
= self
._get
_cmd
('--fsid', fsid
, '--config', f
.path
)
1755 with
with_cephadm_ctx(cmd
) as ctx
:
1756 _cephadm
.command_ceph_volume(ctx
)
1757 assert ctx
.fsid
== fsid
1759 cmd
= self
._get
_cmd
('--fsid', '10000000-0000-0000-0000-0000deadbeef', '--config', f
.path
)
1760 with
with_cephadm_ctx(cmd
) as ctx
:
1761 err
= 'fsid does not match ceph.conf'
1762 with pytest
.raises(_cephadm
.Error
, match
=err
):
1763 _cephadm
.command_ceph_volume(ctx
)
1764 assert ctx
.fsid
== None
1766 def test_config(self
, cephadm_fs
):
1767 cmd
= self
._get
_cmd
('--config', 'foo')
1768 with
with_cephadm_ctx(cmd
) as ctx
:
1769 err
= r
'No such file or directory'
1770 with pytest
.raises(_cephadm
.Error
, match
=err
):
1771 _cephadm
.command_ceph_volume(ctx
)
1773 cephadm_fs
.create_file('bar')
1774 cmd
= self
._get
_cmd
('--config', 'bar')
1775 with
with_cephadm_ctx(cmd
) as ctx
:
1776 _cephadm
.command_ceph_volume(ctx
)
1777 assert ctx
.config
== 'bar'
1779 def test_keyring(self
, cephadm_fs
):
1780 cmd
= self
._get
_cmd
('--keyring', 'foo')
1781 with
with_cephadm_ctx(cmd
) as ctx
:
1782 err
= r
'No such file or directory'
1783 with pytest
.raises(_cephadm
.Error
, match
=err
):
1784 _cephadm
.command_ceph_volume(ctx
)
1786 cephadm_fs
.create_file('bar')
1787 cmd
= self
._get
_cmd
('--keyring', 'bar')
1788 with
with_cephadm_ctx(cmd
) as ctx
:
1789 _cephadm
.command_ceph_volume(ctx
)
1790 assert ctx
.keyring
== 'bar'
1794 def test_unit_run(self
, cephadm_fs
):
1795 fsid
= '9b9d7609-f4d5-4aba-94c8-effa764d96c9'
1797 'files': {'iscsi-gateway.cfg': ''}
1799 with
with_cephadm_ctx(['--image=ceph/ceph'], list_networks
={}) as ctx
:
1801 ctx
.container_engine
= mock_docker()
1802 ctx
.config_json
= json
.dumps(config_json
)
1804 _cephadm
.get_parm
.return_value
= config_json
1805 c
= _cephadm
.get_container(ctx
, fsid
, 'iscsi', 'daemon_id')
1807 _cephadm
.make_data_dir(ctx
, fsid
, 'iscsi', 'daemon_id')
1808 _cephadm
.deploy_daemon_units(
1818 with
open('/var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/unit.run') as f
:
1819 assert f
.read() == """set -e
1820 if ! grep -qs /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs /proc/mounts; then mount -t configfs none /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs; fi
1821 # iscsi tcmu-runner container
1822 ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id-tcmu 2> /dev/null
1823 ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu 2> /dev/null
1824 /usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/local/scripts/tcmu-runner-entrypoint.sh --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/tcmu-runner-entrypoint.sh:/usr/local/scripts/tcmu-runner-entrypoint.sh -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph &
1826 ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id 2> /dev/null
1827 ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id 2> /dev/null
1828 /usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/bin/rbd-target-api --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/tcmu-runner-entrypoint.sh:/usr/local/scripts/tcmu-runner-entrypoint.sh -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph
1831 def test_get_container(self
):
1833 Due to a combination of socket.getfqdn() and podman's behavior to
1834 add the container name into the /etc/hosts file, we cannot use periods
1835 in container names. But we need to be able to detect old existing containers.
1836 Assert this behaviour. I think we can remove this in Ceph R
1838 fsid
= '9b9d7609-f4d5-4aba-94c8-effa764d96c9'
1839 with
with_cephadm_ctx(['--image=ceph/ceph'], list_networks
={}) as ctx
:
1841 c
= _cephadm
.get_container(ctx
, fsid
, 'iscsi', 'something')
1842 assert c
.cname
== 'ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-something'
1843 assert c
.old_cname
== 'ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.something'
1846 class TestCheckHost
:
1848 @mock.patch('cephadm.find_executable', return_value
='foo')
1849 @mock.patch('cephadm.check_time_sync', return_value
=True)
1850 @mock.patch('cephadm.logger')
1851 def test_container_engine(self
, _logger
, _find_executable
, _check_time_sync
):
1852 ctx
= _cephadm
.CephadmContext()
1854 ctx
.container_engine
= None
1855 err
= r
'No container engine binary found'
1856 with pytest
.raises(_cephadm
.Error
, match
=err
):
1857 _cephadm
.command_check_host(ctx
)
1859 ctx
.container_engine
= mock_podman()
1860 _cephadm
.command_check_host(ctx
)
1862 ctx
.container_engine
= mock_docker()
1863 _cephadm
.command_check_host(ctx
)
1868 @pytest.mark
.parametrize('os_release',
1873 VERSION="20.04 LTS (Focal Fossa)"
1876 PRETTY_NAME="Ubuntu 20.04 LTS"
1878 HOME_URL="https://www.ubuntu.com/"
1879 SUPPORT_URL="https://help.ubuntu.com/"
1880 BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
1881 PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
1882 VERSION_CODENAME=focal
1883 UBUNTU_CODENAME=focal
1891 ID_LIKE="rhel fedora"
1893 PLATFORM_ID="platform:el8"
1894 PRETTY_NAME="CentOS Linux 8 (Core)"
1896 CPE_NAME="cpe:/o:centos:centos:8"
1897 HOME_URL="https://www.centos.org/"
1898 BUG_REPORT_URL="https://bugs.centos.org/"
1900 CENTOS_MANTISBT_PROJECT="CentOS-8"
1901 CENTOS_MANTISBT_PROJECT_VERSION="8"
1902 REDHAT_SUPPORT_PRODUCT="centos"
1903 REDHAT_SUPPORT_PRODUCT_VERSION="8"
1908 NAME="openSUSE Tumbleweed"
1909 # VERSION="20210810"
1910 ID="opensuse-tumbleweed"
1911 ID_LIKE="opensuse suse"
1912 VERSION_ID="20210810"
1913 PRETTY_NAME="openSUSE Tumbleweed"
1915 CPE_NAME="cpe:/o:opensuse:tumbleweed:20210810"
1916 BUG_REPORT_URL="https://bugs.opensuse.org"
1917 HOME_URL="https://www.opensuse.org/"
1918 DOCUMENTATION_URL="https://en.opensuse.org/Portal:Tumbleweed"
1919 LOGO="distributor-logo"
1922 @mock.patch('cephadm.find_executable', return_value
='foo')
1923 def test_container_engine(self
, _find_executable
, os_release
, cephadm_fs
):
1924 cephadm_fs
.create_file('/etc/os-release', contents
=os_release
)
1925 ctx
= _cephadm
.CephadmContext()
1927 ctx
.container_engine
= None
1928 _cephadm
.command_rm_repo(ctx
)
1930 ctx
.container_engine
= mock_podman()
1931 _cephadm
.command_rm_repo(ctx
)
1933 ctx
.container_engine
= mock_docker()
1934 _cephadm
.command_rm_repo(ctx
)
1937 class TestValidateRepo
:
1939 @pytest.mark
.parametrize('values',
1946 os_release
=dedent("""
1948 VERSION="20.04 LTS (Focal Fossa)"
1951 PRETTY_NAME="Ubuntu 20.04 LTS"
1953 HOME_URL="https://www.ubuntu.com/"
1954 SUPPORT_URL="https://help.ubuntu.com/"
1955 BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
1956 PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
1957 VERSION_CODENAME=focal
1958 UBUNTU_CODENAME=focal
1961 # YumDnf on Centos8 - OK
1966 os_release
=dedent("""
1970 ID_LIKE="rhel fedora"
1972 PLATFORM_ID="platform:el8"
1973 PRETTY_NAME="CentOS Linux 8 (Core)"
1975 CPE_NAME="cpe:/o:centos:centos:8"
1976 HOME_URL="https://www.centos.org/"
1977 BUG_REPORT_URL="https://bugs.centos.org/"
1979 CENTOS_MANTISBT_PROJECT="CentOS-8"
1980 CENTOS_MANTISBT_PROJECT_VERSION="8"
1981 REDHAT_SUPPORT_PRODUCT="centos"
1982 REDHAT_SUPPORT_PRODUCT_VERSION="8"
1985 # YumDnf on Fedora - Fedora not supported
1989 err_text
="does not build Fedora",
1990 os_release
=dedent("""
1992 VERSION="35 (Cloud Edition)"
1996 PLATFORM_ID="platform:f35"
1997 PRETTY_NAME="Fedora Linux 35 (Cloud Edition)"
1998 ANSI_COLOR="0;38;2;60;110;180"
1999 LOGO=fedora-logo-icon
2000 CPE_NAME="cpe:/o:fedoraproject:fedora:35"
2001 HOME_URL="https://fedoraproject.org/"
2002 DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/f35/system-administrators-guide/"
2003 SUPPORT_URL="https://ask.fedoraproject.org/"
2004 BUG_REPORT_URL="https://bugzilla.redhat.com/"
2005 REDHAT_BUGZILLA_PRODUCT="Fedora"
2006 REDHAT_BUGZILLA_PRODUCT_VERSION=35
2007 REDHAT_SUPPORT_PRODUCT="Fedora"
2008 REDHAT_SUPPORT_PRODUCT_VERSION=35
2009 PRIVACY_POLICY_URL="https://fedoraproject.org/wiki/Legal:PrivacyPolicy"
2010 VARIANT="Cloud Edition"
2014 # YumDnf on Centos 7 - no pacific
2018 err_text
="does not support pacific",
2019 os_release
=dedent("""
2023 ID_LIKE="rhel fedora"
2025 PRETTY_NAME="CentOS Linux 7 (Core)"
2027 CPE_NAME="cpe:/o:centos:centos:7"
2028 HOME_URL="https://www.centos.org/"
2029 BUG_REPORT_URL="https://bugs.centos.org/"
2031 CENTOS_MANTISBT_PROJECT="CentOS-7"
2032 CENTOS_MANTISBT_PROJECT_VERSION="7"
2033 REDHAT_SUPPORT_PRODUCT="centos"
2034 REDHAT_SUPPORT_PRODUCT_VERSION="7"
2037 # YumDnf on Centos 7 - nothing after pacific
2041 err_text
="does not support pacific",
2042 os_release
=dedent("""
2046 ID_LIKE="rhel fedora"
2048 PRETTY_NAME="CentOS Linux 7 (Core)"
2050 CPE_NAME="cpe:/o:centos:centos:7"
2051 HOME_URL="https://www.centos.org/"
2052 BUG_REPORT_URL="https://bugs.centos.org/"
2054 CENTOS_MANTISBT_PROJECT="CentOS-7"
2055 CENTOS_MANTISBT_PROJECT_VERSION="7"
2056 REDHAT_SUPPORT_PRODUCT="centos"
2057 REDHAT_SUPPORT_PRODUCT_VERSION="7"
2060 # YumDnf on Centos 7 - nothing v16 or higher
2064 err_text
="does not support",
2065 os_release
=dedent("""
2069 ID_LIKE="rhel fedora"
2071 PRETTY_NAME="CentOS Linux 7 (Core)"
2073 CPE_NAME="cpe:/o:centos:centos:7"
2074 HOME_URL="https://www.centos.org/"
2075 BUG_REPORT_URL="https://bugs.centos.org/"
2077 CENTOS_MANTISBT_PROJECT="CentOS-7"
2078 CENTOS_MANTISBT_PROJECT_VERSION="7"
2079 REDHAT_SUPPORT_PRODUCT="centos"
2080 REDHAT_SUPPORT_PRODUCT_VERSION="7"
2083 @mock.patch('cephadm.find_executable', return_value
='foo')
2084 def test_distro_validation(self
, _find_executable
, values
, cephadm_fs
):
2085 os_release
= values
['os_release']
2086 release
= values
['release']
2087 version
= values
['version']
2088 err_text
= values
['err_text']
2090 cephadm_fs
.create_file('/etc/os-release', contents
=os_release
)
2091 ctx
= _cephadm
.CephadmContext()
2092 ctx
.repo_url
= 'http://localhost'
2093 pkg
= _cephadm
.create_packager(ctx
, stable
=release
, version
=version
)
2096 with pytest
.raises(_cephadm
.Error
, match
=err_text
):
2099 with mock
.patch('cephadm.urlopen', return_value
=None):
2102 @pytest.mark
.parametrize('values',
2109 os_release
=dedent("""
2111 VERSION="20.04 LTS (Focal Fossa)"
2114 PRETTY_NAME="Ubuntu 20.04 LTS"
2116 HOME_URL="https://www.ubuntu.com/"
2117 SUPPORT_URL="https://help.ubuntu.com/"
2118 BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
2119 PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
2120 VERSION_CODENAME=focal
2121 UBUNTU_CODENAME=focal
2124 # YumDnf on Centos8 - force failure
2128 err_text
="failed to fetch repository metadata",
2129 os_release
=dedent("""
2133 ID_LIKE="rhel fedora"
2135 PLATFORM_ID="platform:el8"
2136 PRETTY_NAME="CentOS Linux 8 (Core)"
2138 CPE_NAME="cpe:/o:centos:centos:8"
2139 HOME_URL="https://www.centos.org/"
2140 BUG_REPORT_URL="https://bugs.centos.org/"
2142 CENTOS_MANTISBT_PROJECT="CentOS-8"
2143 CENTOS_MANTISBT_PROJECT_VERSION="8"
2144 REDHAT_SUPPORT_PRODUCT="centos"
2145 REDHAT_SUPPORT_PRODUCT_VERSION="8"
2148 @mock.patch('cephadm.find_executable', return_value
='foo')
2149 @mock.patch('cephadm.logger')
2150 def test_http_validation(self
, _logger
, _find_executable
, values
, cephadm_fs
):
2151 from urllib
.error
import HTTPError
2153 os_release
= values
['os_release']
2154 release
= values
['release']
2155 version
= values
['version']
2156 err_text
= values
['err_text']
2158 cephadm_fs
.create_file('/etc/os-release', contents
=os_release
)
2159 ctx
= _cephadm
.CephadmContext()
2160 ctx
.repo_url
= 'http://localhost'
2161 pkg
= _cephadm
.create_packager(ctx
, stable
=release
, version
=version
)
2163 with mock
.patch('cephadm.urlopen') as _urlopen
:
2164 _urlopen
.side_effect
= HTTPError(ctx
.repo_url
, 404, "not found", None, fp
=None)
2166 with pytest
.raises(_cephadm
.Error
, match
=err_text
):
2174 @mock.patch('time.sleep')
2175 @mock.patch('cephadm.call', return_value
=('', '', 0))
2176 @mock.patch('cephadm.get_image_info_from_inspect', return_value
={})
2177 @mock.patch('cephadm.logger')
2178 def test_error(self
, _logger
, _get_image_info_from_inspect
, _call
, _sleep
):
2179 ctx
= _cephadm
.CephadmContext()
2180 ctx
.container_engine
= mock_podman()
2181 ctx
.insecure
= False
2183 _call
.return_value
= ('', '', 0)
2184 retval
= _cephadm
.command_pull(ctx
)
2187 err
= 'maximum retries reached'
2189 _call
.return_value
= ('', 'foobar', 1)
2190 with pytest
.raises(_cephadm
.Error
) as e
:
2191 _cephadm
.command_pull(ctx
)
2192 assert err
not in str(e
.value
)
2194 _call
.return_value
= ('', 'net/http: TLS handshake timeout', 1)
2195 with pytest
.raises(_cephadm
.Error
) as e
:
2196 _cephadm
.command_pull(ctx
)
2197 assert err
in str(e
.value
)
2199 @mock.patch('cephadm.get_image_info_from_inspect', return_value
={})
2200 @mock.patch('cephadm.infer_local_ceph_image', return_value
='last_local_ceph_image')
2201 def test_image(self
, _infer_local_ceph_image
, _get_image_info_from_inspect
):
2203 with
with_cephadm_ctx(cmd
) as ctx
:
2204 retval
= _cephadm
.command_pull(ctx
)
2206 assert ctx
.image
== _cephadm
.DEFAULT_IMAGE
2208 with mock
.patch
.dict(os
.environ
, {"CEPHADM_IMAGE": 'cephadm_image_environ'}):
2210 with
with_cephadm_ctx(cmd
) as ctx
:
2211 retval
= _cephadm
.command_pull(ctx
)
2213 assert ctx
.image
== 'cephadm_image_environ'
2215 cmd
= ['--image', 'cephadm_image_param', 'pull']
2216 with
with_cephadm_ctx(cmd
) as ctx
:
2217 retval
= _cephadm
.command_pull(ctx
)
2219 assert ctx
.image
== 'cephadm_image_param'
2222 class TestApplySpec
:
2224 def test_extract_host_info_from_applied_spec(self
, cephadm_fs
):
2228 addr: 192.168.122.44
2235 addr: 192.168.122.247
2246 rgw_frontend_ssl_certificate: |
2247 -----BEGIN PRIVATE KEY-----
2248 V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt
2249 ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15
2250 IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu
2251 YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg
2252 ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=
2253 -----END PRIVATE KEY-----
2254 -----BEGIN CERTIFICATE-----
2255 V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt
2256 ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15
2257 IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu
2258 YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg
2259 ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=
2260 -----END CERTIFICATE-----
2265 cephadm_fs
.create_file('spec.yml', contents
=yaml
)
2266 retdic
= [{'hostname': 'vm-00', 'addr': '192.168.122.44'},
2267 {'hostname': 'vm-01', 'addr': '192.168.122.247'},
2268 {'hostname': 'vm-02',}]
2270 with
open('spec.yml') as f
:
2271 dic
= _cephadm
._extract
_host
_info
_from
_applied
_spec
(f
)
2272 assert dic
== retdic
2274 @mock.patch('cephadm.call', return_value
=('', '', 0))
2275 @mock.patch('cephadm.logger')
2276 def test_distribute_ssh_keys(self
, _logger
, _call
):
2277 ctx
= _cephadm
.CephadmContext()
2278 ctx
.ssh_public_key
= None
2279 ctx
.ssh_user
= 'root'
2281 host_spec
= {'service_type': 'host', 'hostname': 'vm-02', 'addr': '192.168.122.165'}
2283 retval
= _cephadm
._distribute
_ssh
_keys
(ctx
, host_spec
, 'bootstrap_hostname')
2287 _call
.return_value
= ('', '', 1)
2289 retval
= _cephadm
._distribute
_ssh
_keys
(ctx
, host_spec
, 'bootstrap_hostname')
2294 class TestSNMPGateway
:
2296 'snmp_community': 'public',
2297 'destination': '192.168.1.10:162',
2298 'snmp_version': 'V2c',
2300 V3_no_priv_config
= {
2301 'destination': '192.168.1.10:162',
2302 'snmp_version': 'V3',
2303 'snmp_v3_auth_username': 'myuser',
2304 'snmp_v3_auth_password': 'mypassword',
2305 'snmp_v3_auth_protocol': 'SHA',
2306 'snmp_v3_engine_id': '8000C53F00000000',
2309 'destination': '192.168.1.10:162',
2310 'snmp_version': 'V3',
2311 'snmp_v3_auth_username': 'myuser',
2312 'snmp_v3_auth_password': 'mypassword',
2313 'snmp_v3_auth_protocol': 'SHA',
2314 'snmp_v3_priv_protocol': 'DES',
2315 'snmp_v3_priv_password': 'mysecret',
2316 'snmp_v3_engine_id': '8000C53F00000000',
2318 no_destination_config
= {
2319 'snmp_version': 'V3',
2320 'snmp_v3_auth_username': 'myuser',
2321 'snmp_v3_auth_password': 'mypassword',
2322 'snmp_v3_auth_protocol': 'SHA',
2323 'snmp_v3_priv_protocol': 'DES',
2324 'snmp_v3_priv_password': 'mysecret',
2325 'snmp_v3_engine_id': '8000C53F00000000',
2327 bad_version_config
= {
2328 'snmp_community': 'public',
2329 'destination': '192.168.1.10:162',
2330 'snmp_version': 'V1',
2333 def test_unit_run_V2c(self
, cephadm_fs
):
2334 fsid
= 'ca734440-3dc6-11ec-9b98-5254002537a6'
2335 with
with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks
={}) as ctx
:
2337 ctx
.config_json
= json
.dumps(self
.V2c_config
)
2339 ctx
.tcp_ports
= '9464'
2340 _cephadm
.get_parm
.return_value
= self
.V2c_config
2341 c
= _cephadm
.get_container(ctx
, fsid
, 'snmp-gateway', 'daemon_id')
2343 _cephadm
.make_data_dir(ctx
, fsid
, 'snmp-gateway', 'daemon_id')
2345 _cephadm
.create_daemon_dirs(ctx
, fsid
, 'snmp-gateway', 'daemon_id', 0, 0)
2346 with
open(f
'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/snmp-gateway.conf', 'r') as f
:
2347 conf
= f
.read().rstrip()
2348 assert conf
== 'SNMP_NOTIFIER_COMMUNITY=public'
2350 _cephadm
.deploy_daemon_units(
2359 with
open(f
'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/unit.run', 'r') as f
:
2360 run_cmd
= f
.readlines()[-1].rstrip()
2361 assert run_cmd
.endswith('docker.io/maxwo/snmp-notifier:v1.2.1 --web.listen-address=:9464 --snmp.destination=192.168.1.10:162 --snmp.version=V2c --log.level=info --snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl')
2363 def test_unit_run_V3_noPriv(self
, cephadm_fs
):
2364 fsid
= 'ca734440-3dc6-11ec-9b98-5254002537a6'
2365 with
with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks
={}) as ctx
:
2367 ctx
.config_json
= json
.dumps(self
.V3_no_priv_config
)
2369 ctx
.tcp_ports
= '9465'
2370 _cephadm
.get_parm
.return_value
= self
.V3_no_priv_config
2371 c
= _cephadm
.get_container(ctx
, fsid
, 'snmp-gateway', 'daemon_id')
2373 _cephadm
.make_data_dir(ctx
, fsid
, 'snmp-gateway', 'daemon_id')
2375 _cephadm
.create_daemon_dirs(ctx
, fsid
, 'snmp-gateway', 'daemon_id', 0, 0)
2376 with
open(f
'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/snmp-gateway.conf', 'r') as f
:
2378 assert conf
== 'SNMP_NOTIFIER_AUTH_USERNAME=myuser\nSNMP_NOTIFIER_AUTH_PASSWORD=mypassword\n'
2380 _cephadm
.deploy_daemon_units(
2389 with
open(f
'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/unit.run', 'r') as f
:
2390 run_cmd
= f
.readlines()[-1].rstrip()
2391 assert run_cmd
.endswith('docker.io/maxwo/snmp-notifier:v1.2.1 --web.listen-address=:9465 --snmp.destination=192.168.1.10:162 --snmp.version=V3 --log.level=info --snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl --snmp.authentication-enabled --snmp.authentication-protocol=SHA --snmp.security-engine-id=8000C53F00000000')
2393 def test_unit_run_V3_Priv(self
, cephadm_fs
):
2394 fsid
= 'ca734440-3dc6-11ec-9b98-5254002537a6'
2395 with
with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks
={}) as ctx
:
2397 ctx
.config_json
= json
.dumps(self
.V3_priv_config
)
2399 ctx
.tcp_ports
= '9464'
2400 _cephadm
.get_parm
.return_value
= self
.V3_priv_config
2401 c
= _cephadm
.get_container(ctx
, fsid
, 'snmp-gateway', 'daemon_id')
2403 _cephadm
.make_data_dir(ctx
, fsid
, 'snmp-gateway', 'daemon_id')
2405 _cephadm
.create_daemon_dirs(ctx
, fsid
, 'snmp-gateway', 'daemon_id', 0, 0)
2406 with
open(f
'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/snmp-gateway.conf', 'r') as f
:
2408 assert conf
== 'SNMP_NOTIFIER_AUTH_USERNAME=myuser\nSNMP_NOTIFIER_AUTH_PASSWORD=mypassword\nSNMP_NOTIFIER_PRIV_PASSWORD=mysecret\n'
2410 _cephadm
.deploy_daemon_units(
2419 with
open(f
'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/unit.run', 'r') as f
:
2420 run_cmd
= f
.readlines()[-1].rstrip()
2421 assert run_cmd
.endswith('docker.io/maxwo/snmp-notifier:v1.2.1 --web.listen-address=:9464 --snmp.destination=192.168.1.10:162 --snmp.version=V3 --log.level=info --snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl --snmp.authentication-enabled --snmp.authentication-protocol=SHA --snmp.security-engine-id=8000C53F00000000 --snmp.private-enabled --snmp.private-protocol=DES')
2423 def test_unit_run_no_dest(self
, cephadm_fs
):
2424 fsid
= 'ca734440-3dc6-11ec-9b98-5254002537a6'
2425 with
with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks
={}) as ctx
:
2427 ctx
.config_json
= json
.dumps(self
.no_destination_config
)
2429 ctx
.tcp_ports
= '9464'
2430 _cephadm
.get_parm
.return_value
= self
.no_destination_config
2432 with pytest
.raises(Exception) as e
:
2433 c
= _cephadm
.get_container(ctx
, fsid
, 'snmp-gateway', 'daemon_id')
2434 assert str(e
.value
) == "config is missing destination attribute(<ip>:<port>) of the target SNMP listener"
2436 def test_unit_run_bad_version(self
, cephadm_fs
):
2437 fsid
= 'ca734440-3dc6-11ec-9b98-5254002537a6'
2438 with
with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks
={}) as ctx
:
2440 ctx
.config_json
= json
.dumps(self
.bad_version_config
)
2442 ctx
.tcp_ports
= '9464'
2443 _cephadm
.get_parm
.return_value
= self
.bad_version_config
2445 with pytest
.raises(Exception) as e
:
2446 c
= _cephadm
.get_container(ctx
, fsid
, 'snmp-gateway', 'daemon_id')
2447 assert str(e
.value
) == 'not a valid snmp version: V1'
2449 class TestNetworkValidation
:
2451 def test_ipv4_subnet(self
):
2452 rc
, v
, msg
= _cephadm
.check_subnet('192.168.1.0/24')
2453 assert rc
== 0 and v
[0] == 4
2455 def test_ipv4_subnet_list(self
):
2456 rc
, v
, msg
= _cephadm
.check_subnet('192.168.1.0/24,10.90.90.0/24')
2457 assert rc
== 0 and not msg
2459 def test_ipv4_subnet_list_with_spaces(self
):
2460 rc
, v
, msg
= _cephadm
.check_subnet('192.168.1.0/24, 10.90.90.0/24 ')
2461 assert rc
== 0 and not msg
2463 def test_ipv4_subnet_badlist(self
):
2464 rc
, v
, msg
= _cephadm
.check_subnet('192.168.1.0/24,192.168.1.1')
2465 assert rc
== 1 and msg
2467 def test_ipv4_subnet_mixed(self
):
2468 rc
, v
, msg
= _cephadm
.check_subnet('192.168.100.0/24,fe80::/64')
2469 assert rc
== 0 and v
== [4,6]
2471 def test_ipv6_subnet(self
):
2472 rc
, v
, msg
= _cephadm
.check_subnet('fe80::/64')
2473 assert rc
== 0 and v
[0] == 6
2475 def test_subnet_mask_missing(self
):
2476 rc
, v
, msg
= _cephadm
.check_subnet('192.168.1.58')
2477 assert rc
== 1 and msg
2479 def test_subnet_mask_junk(self
):
2480 rc
, v
, msg
= _cephadm
.check_subnet('wah')
2481 assert rc
== 1 and msg
2483 def test_ip_in_subnet(self
):
2484 # valid ip and only one valid subnet
2485 rc
= _cephadm
.ip_in_subnets('192.168.100.1', '192.168.100.0/24')
2488 # valid ip and valid subnets list without spaces
2489 rc
= _cephadm
.ip_in_subnets('192.168.100.1', '192.168.100.0/24,10.90.90.0/24')
2492 # valid ip and valid subnets list with spaces
2493 rc
= _cephadm
.ip_in_subnets('10.90.90.2', '192.168.1.0/24, 192.168.100.0/24, 10.90.90.0/24')
2496 # valid ip that doesn't belong to any subnet
2497 rc
= _cephadm
.ip_in_subnets('192.168.100.2', '192.168.50.0/24, 10.90.90.0/24')
2500 # valid ip that doesn't belong to the subnet (only 14 hosts)
2501 rc
= _cephadm
.ip_in_subnets('192.168.100.20', '192.168.100.0/28')
2504 # valid ip and valid IPV6 network
2505 rc
= _cephadm
.ip_in_subnets('fe80::5054:ff:fef4:873a', 'fe80::/64')
2508 # valid wrapped ip and valid IPV6 network
2509 rc
= _cephadm
.ip_in_subnets('[fe80::5054:ff:fef4:873a]', 'fe80::/64')
2512 # valid ip and that doesn't belong to IPV6 network
2513 rc
= _cephadm
.ip_in_subnets('fe80::5054:ff:fef4:873a', '2001:db8:85a3::/64')
2516 # invalid IPv4 and valid subnets list
2517 with pytest
.raises(Exception):
2518 rc
= _cephadm
.ip_in_sublets('10.90.200.', '192.168.1.0/24, 192.168.100.0/24, 10.90.90.0/24')
2520 # invalid IPv6 and valid subnets list
2521 with pytest
.raises(Exception):
2522 rc
= _cephadm
.ip_in_sublets('fe80:2030:31:24', 'fe80::/64')
2524 @pytest.mark
.parametrize("conf", [
2526 public_network='1.1.1.0/24,2.2.2.0/24'
2527 cluster_network="3.3.3.0/24, 4.4.4.0/24"
2530 public_network=" 1.1.1.0/24,2.2.2.0/24 "
2531 cluster_network=3.3.3.0/24, 4.4.4.0/24
2534 public_network= 1.1.1.0/24, 2.2.2.0/24
2535 cluster_network='3.3.3.0/24,4.4.4.0/24'
2537 @mock.patch('cephadm.list_networks')
2538 @mock.patch('cephadm.logger')
2539 def test_get_networks_from_conf(self
, _logger
, _list_networks
, conf
, cephadm_fs
):
2540 cephadm_fs
.create_file('ceph.conf', contents
=conf
)
2541 _list_networks
.return_value
= {'1.1.1.0/24': {'eth0': ['1.1.1.1']},
2542 '2.2.2.0/24': {'eth1': ['2.2.2.2']},
2543 '3.3.3.0/24': {'eth2': ['3.3.3.3']},
2544 '4.4.4.0/24': {'eth3': ['4.4.4.4']}}
2545 ctx
= _cephadm
.CephadmContext()
2546 ctx
.config
= 'ceph.conf'
2547 ctx
.mon_ip
= '1.1.1.1'
2548 ctx
.cluster_network
= None
2549 # what the cephadm module does with the public network string is
2550 # [x.strip() for x in out.split(',')]
2551 # so we must make sure our output, through that alteration,
2552 # generates correctly formatted networks
2553 def _str_to_networks(s
):
2554 return [x
.strip() for x
in s
.split(',')]
2555 public_network
= _cephadm
.get_public_net_from_cfg(ctx
)
2556 assert _str_to_networks(public_network
) == ['1.1.1.0/24', '2.2.2.0/24']
2557 cluster_network
, ipv6
= _cephadm
.prepare_cluster_network(ctx
)
2559 assert _str_to_networks(cluster_network
) == ['3.3.3.0/24', '4.4.4.0/24']
2562 @mock.patch('cephadm.sysctl_get')
2563 def test_filter_sysctl_settings(self
, _sysctl_get
):
2564 ctx
= _cephadm
.CephadmContext()
2566 # comment-only lines should be ignored
2568 # As should whitespace-only lines",
2571 # inline comments are stripped when querying
2572 "something = value # inline comment",
2573 "fs.aio-max-nr = 1048576",
2574 "kernel.pid_max = 4194304",
2575 "vm.lowmem_reserve_ratio = 256\t256\t32\t0\t0",
2576 " vm.max_map_count = 65530 ",
2577 " vm.max_map_count = 65530 ",
2579 _sysctl_get
.side_effect
= [
2583 "256\t256\t32\t0\t0",
2587 result
= _cephadm
.filter_sysctl_settings(ctx
, input)
2588 assert len(_sysctl_get
.call_args_list
) == 6
2589 assert _sysctl_get
.call_args_list
[0].args
[1] == "something"
2590 assert _sysctl_get
.call_args_list
[1].args
[1] == "fs.aio-max-nr"
2591 assert _sysctl_get
.call_args_list
[2].args
[1] == "kernel.pid_max"
2592 assert _sysctl_get
.call_args_list
[3].args
[1] == "vm.lowmem_reserve_ratio"
2593 assert _sysctl_get
.call_args_list
[4].args
[1] == "vm.max_map_count"
2594 assert _sysctl_get
.call_args_list
[5].args
[1] == "vm.max_map_count"
2596 "fs.aio-max-nr = 1048576",
2597 " vm.max_map_count = 65530 ",
2601 single_es_node_conf
= {
2602 'elasticsearch_nodes': 'http://192.168.0.1:9200'}
2603 multiple_es_nodes_conf
= {
2604 'elasticsearch_nodes': 'http://192.168.0.1:9200,http://192.168.0.2:9300'}
2606 'collector_nodes': 'test:14250'}
2608 def test_single_es(self
, cephadm_fs
):
2609 fsid
= 'ca734440-3dc6-11ec-9b98-5254002537a6'
2610 with
with_cephadm_ctx(['--image=quay.io/jaegertracing/jaeger-collector:1.29'], list_networks
={}) as ctx
:
2612 ctx
.config_json
= json
.dumps(self
.single_es_node_conf
)
2614 c
= _cephadm
.get_container(ctx
, fsid
, 'jaeger-collector', 'daemon_id')
2615 _cephadm
.create_daemon_dirs(ctx
, fsid
, 'jaeger-collector', 'daemon_id', 0, 0)
2616 _cephadm
.deploy_daemon_units(
2625 with
open(f
'/var/lib/ceph/{fsid}/jaeger-collector.daemon_id/unit.run', 'r') as f
:
2626 run_cmd
= f
.readlines()[-1].rstrip()
2627 assert run_cmd
.endswith('SPAN_STORAGE_TYPE=elasticsearch -e ES_SERVER_URLS=http://192.168.0.1:9200 quay.io/jaegertracing/jaeger-collector:1.29')
2629 def test_multiple_es(self
, cephadm_fs
):
2630 fsid
= 'ca734440-3dc6-11ec-9b98-5254002537a6'
2631 with
with_cephadm_ctx(['--image=quay.io/jaegertracing/jaeger-collector:1.29'], list_networks
={}) as ctx
:
2633 ctx
.config_json
= json
.dumps(self
.multiple_es_nodes_conf
)
2635 c
= _cephadm
.get_container(ctx
, fsid
, 'jaeger-collector', 'daemon_id')
2636 _cephadm
.create_daemon_dirs(ctx
, fsid
, 'jaeger-collector', 'daemon_id', 0, 0)
2637 _cephadm
.deploy_daemon_units(
2646 with
open(f
'/var/lib/ceph/{fsid}/jaeger-collector.daemon_id/unit.run', 'r') as f
:
2647 run_cmd
= f
.readlines()[-1].rstrip()
2648 assert run_cmd
.endswith('SPAN_STORAGE_TYPE=elasticsearch -e ES_SERVER_URLS=http://192.168.0.1:9200,http://192.168.0.2:9300 quay.io/jaegertracing/jaeger-collector:1.29')
2650 def test_jaeger_agent(self
, cephadm_fs
):
2651 fsid
= 'ca734440-3dc6-11ec-9b98-5254002537a6'
2652 with
with_cephadm_ctx(['--image=quay.io/jaegertracing/jaeger-agent:1.29'], list_networks
={}) as ctx
:
2654 ctx
.config_json
= json
.dumps(self
.agent_conf
)
2656 c
= _cephadm
.get_container(ctx
, fsid
, 'jaeger-agent', 'daemon_id')
2657 _cephadm
.create_daemon_dirs(ctx
, fsid
, 'jaeger-agent', 'daemon_id', 0, 0)
2658 _cephadm
.deploy_daemon_units(
2667 with
open(f
'/var/lib/ceph/{fsid}/jaeger-agent.daemon_id/unit.run', 'r') as f
:
2668 run_cmd
= f
.readlines()[-1].rstrip()
2669 assert run_cmd
.endswith('quay.io/jaegertracing/jaeger-agent:1.29 --reporter.grpc.host-port=test:14250 --processor.jaeger-compact.server-host-port=6799')
2671 class TestRescan(fake_filesystem_unittest
.TestCase
):
2674 self
.setUpPyfakefs()
2675 if not fake_filesystem
.is_root():
2676 fake_filesystem
.set_uid(0)
2678 self
.fs
.create_dir('/sys/class')
2679 self
.ctx
= _cephadm
.CephadmContext()
2680 self
.ctx
.func
= _cephadm
.command_rescan_disks
2682 @mock.patch('cephadm.logger')
2683 def test_no_hbas(self
, _logger
):
2684 out
= _cephadm
.command_rescan_disks(self
.ctx
)
2685 assert out
== 'Ok. No compatible HBAs found'
2687 @mock.patch('cephadm.logger')
2688 def test_success(self
, _logger
):
2689 self
.fs
.create_file('/sys/class/scsi_host/host0/scan')
2690 self
.fs
.create_file('/sys/class/scsi_host/host1/scan')
2691 out
= _cephadm
.command_rescan_disks(self
.ctx
)
2692 assert out
.startswith('Ok. 2 adapters detected: 2 rescanned, 0 skipped, 0 failed')
2694 @mock.patch('cephadm.logger')
2695 def test_skip_usb_adapter(self
, _logger
):
2696 self
.fs
.create_file('/sys/class/scsi_host/host0/scan')
2697 self
.fs
.create_file('/sys/class/scsi_host/host1/scan')
2698 self
.fs
.create_file('/sys/class/scsi_host/host1/proc_name', contents
='usb-storage')
2699 out
= _cephadm
.command_rescan_disks(self
.ctx
)
2700 assert out
.startswith('Ok. 2 adapters detected: 1 rescanned, 1 skipped, 0 failed')
2702 @mock.patch('cephadm.logger')
2703 def test_skip_unknown_adapter(self
, _logger
):
2704 self
.fs
.create_file('/sys/class/scsi_host/host0/scan')
2705 self
.fs
.create_file('/sys/class/scsi_host/host1/scan')
2706 self
.fs
.create_file('/sys/class/scsi_host/host1/proc_name', contents
='unknown')
2707 out
= _cephadm
.command_rescan_disks(self
.ctx
)
2708 assert out
.startswith('Ok. 2 adapters detected: 1 rescanned, 1 skipped, 0 failed')