]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
patches: remove fuzz and re-format
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / batch / tox.ini
CommitLineData
1adf2230
AA
1[tox]
2envlist = {centos7,xenial}-{bluestore,filestore}-{single_type,single_type_dmcrypt}
3skipsdist = True
4
5[testenv]
6whitelist_externals =
7 vagrant
8 bash
9 git
10 cp
11passenv=*
12setenv=
13 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
14 ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
15 ANSIBLE_STDOUT_CALLBACK = debug
16 ANSIBLE_RETRY_FILES_ENABLED = False
17 ANSIBLE_SSH_RETRIES = 5
18 VAGRANT_CWD = {changedir}
19 CEPH_VOLUME_DEBUG = 1
20deps=
21 ansible~=2.6,<2.7
22 testinfra
23 pytest-xdist
24 notario>=0.0.13
25changedir=
26 centos7-filestore-single_type: {toxinidir}/centos7/filestore/single-type
27 centos7-filestore-single_type_dmcrypt: {toxinidir}/centos7/filestore/single-type-dmcrypt
28 centos7-bluestore-single_type: {toxinidir}/centos7/bluestore/single-type
29 centos7-bluestore-single_type_dmcrypt: {toxinidir}/centos7/bluestore/single-type-dmcrypt
30 xenial-filestore-single_type: {toxinidir}/xenial/filestore/single-type
31 xenial-filestore-single_type_dmcrypt: {toxinidir}/xenial/filestore/single-type-dmcrypt
32 xenial-bluestore-single_type: {toxinidir}/xenial/bluestore/single-type
33 xenial-bluestore-single_type_dmcrypt: {toxinidir}/xenial/bluestore/single-type-dmcrypt
34commands=
35 git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
36
37 bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
38 bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
39
40 cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
41
42 # use ceph-ansible to deploy a ceph cluster on the vms
43 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
44
45 # prepare nodes for testing with testinfra
46 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
47
48 # test cluster state using ceph-ansible tests
49 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
50
51 # reboot all vms - attempt
52 bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
53
54 # retest to ensure cluster came back up correctly after rebooting
55 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
56
57 # destroy an OSD, zap it's device and recreate it using it's ID
58 ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
59
60 # retest to ensure cluster came back up correctly
61 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
62
63 vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}