]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
import ceph quincy 17.2.6
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / simple / tox.ini
CommitLineData
d2e6a577 1[tox]
9f95a23c 2envlist = centos7-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks}
d2e6a577
FG
3skipsdist = True
4
5[testenv]
92f5a8d4 6deps = mock
39ae355f 7allowlist_externals =
d2e6a577
FG
8 vagrant
9 bash
10 git
b32b8144 11 sleep
1adf2230 12 cp
d2e6a577
FG
13passenv=*
14setenv=
522d829b
TL
15 ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
16 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
d2e6a577 17 ANSIBLE_STDOUT_CALLBACK = debug
d2e6a577
FG
18 VAGRANT_CWD = {changedir}
19 CEPH_VOLUME_DEBUG = 1
494da23a 20 DEBIAN_FRONTEND=noninteractive
d2e6a577 21changedir=
3efd9988
FG
22 centos7-filestore-activate: {toxinidir}/centos7/filestore/activate
23 centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
b32b8144
FG
24 centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
25 centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
26 centos7-filestore-dmcrypt_plain: {toxinidir}/centos7/filestore/dmcrypt-plain
27 centos7-filestore-dmcrypt_luks: {toxinidir}/centos7/filestore/dmcrypt-luks
d2e6a577
FG
28commands=
29 git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
91327a77 30 pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
d2e6a577 31
1adf2230 32 bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
3efd9988 33 bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
d2e6a577 34
1adf2230
AA
35 cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
36
d2e6a577 37 # use ceph-ansible to deploy a ceph cluster on the vms
1adf2230 38 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
d2e6a577
FG
39
40 # prepare nodes for testing with testinfra
41 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
42
a8e16298 43 # test cluster state testinfra
522d829b 44 py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
d2e6a577 45
3efd9988
FG
46 # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
47 ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
48
d2e6a577 49 # reboot all vms
94b18763 50 bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
d2e6a577 51
b32b8144
FG
52 # wait 2 minutes for services to be ready
53 sleep 120
54
d2e6a577 55 # retest to ensure cluster came back up correctly after rebooting
522d829b 56 py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
d2e6a577 57
11fdf7f2 58 vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}