]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
update sources to 12.2.8
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / simple / tox.ini
1 [tox]
2 envlist = {centos7,xenial}-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks}
3 skipsdist = True
4
5 [testenv]
6 whitelist_externals =
7 vagrant
8 bash
9 git
10 sleep
11 cp
12 passenv=*
13 setenv=
14 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
15 ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
16 ANSIBLE_STDOUT_CALLBACK = debug
17 ANSIBLE_RETRY_FILES_ENABLED = False
18 ANSIBLE_SSH_RETRIES = 5
19 VAGRANT_CWD = {changedir}
20 CEPH_VOLUME_DEBUG = 1
21 deps=
22 ansible~=2.6,<2.7
23 testinfra
24 pytest-xdist
25 notario>=0.0.13
26 changedir=
27 centos7-filestore-activate: {toxinidir}/centos7/filestore/activate
28 centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
29 xenial-filestore-activate: {toxinidir}/xenial/filestore/activate
30 xenial-bluestore-activate: {toxinidir}/xenial/bluestore/activate
31 xenial-bluestore-dmcrypt_plain: {toxinidir}/xenial/bluestore/dmcrypt-plain
32 xenial-bluestore-dmcrypt_luks: {toxinidir}/xenial/bluestore/dmcrypt-luks
33 xenial-filestore-dmcrypt_plain: {toxinidir}/xenial/filestore/dmcrypt-plain
34 xenial-filestore-dmcrypt_luks: {toxinidir}/xenial/filestore/dmcrypt-luks
35 centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
36 centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
37 centos7-filestore-dmcrypt_plain: {toxinidir}/centos7/filestore/dmcrypt-plain
38 centos7-filestore-dmcrypt_luks: {toxinidir}/centos7/filestore/dmcrypt-luks
39 commands=
40 git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
41 # XXX Ideally we should be able to consume the requirements for ceph-ansible directly,
42 # but the master branch doesn't pin dependencies so we can't guarantee to work correctly
43 #pip install -r {envdir}/tmp/ceph-ansible/requirements.txt
44
45 bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
46 bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
47
48 cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
49
50 # use ceph-ansible to deploy a ceph cluster on the vms
51 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
52
53 # prepare nodes for testing with testinfra
54 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
55
56 # test cluster state using ceph-ansible tests
57 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
58
59 # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
60 ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
61
62 # reboot all vms
63 bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
64
65 # wait 2 minutes for services to be ready
66 sleep 120
67
68 # retest to ensure cluster came back up correctly after rebooting
69 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
70
71 vagrant destroy --force