]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
bump version to 12.2.5-pve1
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / simple / tox.ini
1 [tox]
2 envlist = {centos7,xenial}-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks}
3 skipsdist = True
4
5 [testenv]
6 whitelist_externals =
7 vagrant
8 bash
9 git
10 sleep
11 passenv=*
12 setenv=
13 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
14 ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
15 ANSIBLE_STDOUT_CALLBACK = debug
16 ANSIBLE_RETRY_FILES_ENABLED = False
17 ANSIBLE_SSH_RETRIES = 5
18 VAGRANT_CWD = {changedir}
19 CEPH_VOLUME_DEBUG = 1
20 deps=
21 ansible==2.4.1
22 testinfra==1.7.1
23 pytest-xdist
24 changedir=
25 centos7-filestore-activate: {toxinidir}/centos7/filestore/activate
26 centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
27 xenial-filestore-activate: {toxinidir}/xenial/filestore/activate
28 xenial-bluestore-activate: {toxinidir}/xenial/bluestore/activate
29 xenial-bluestore-dmcrypt_plain: {toxinidir}/xenial/bluestore/dmcrypt-plain
30 xenial-bluestore-dmcrypt_luks: {toxinidir}/xenial/bluestore/dmcrypt-luks
31 xenial-filestore-dmcrypt_plain: {toxinidir}/xenial/filestore/dmcrypt-plain
32 xenial-filestore-dmcrypt_luks: {toxinidir}/xenial/filestore/dmcrypt-luks
33 centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
34 centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
35 centos7-filestore-dmcrypt_plain: {toxinidir}/centos7/filestore/dmcrypt-plain
36 centos7-filestore-dmcrypt_luks: {toxinidir}/centos7/filestore/dmcrypt-luks
37 commands=
38 git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
39
40 vagrant up --no-provision {posargs:--provider=virtualbox}
41 bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
42
43 # use ceph-ansible to deploy a ceph cluster on the vms
44 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}"
45
46 # prepare nodes for testing with testinfra
47 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
48
49 # test cluster state using ceph-ansible tests
50 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
51
52 # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
53 ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
54
55 # reboot all vms
56 bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
57
58 # wait 2 minutes for services to be ready
59 sleep 120
60
61 # retest to ensure cluster came back up correctly after rebooting
62 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
63
64 vagrant destroy --force