]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / batch / tox.ini
CommitLineData
1adf2230 1[tox]
11fdf7f2 2envlist = {centos7,xenial}-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos7-{bluestore,filestore}-{mixed_type,mixed_type_dmcrypt,mixed_type_explicit, mixed_type_dmcrypt_explicit}
1adf2230
AA
3skipsdist = True
4
5[testenv]
92f5a8d4 6deps = mock
1adf2230
AA
7whitelist_externals =
8 vagrant
9 bash
10 git
11 cp
12passenv=*
13setenv=
14 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
15 ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
16 ANSIBLE_STDOUT_CALLBACK = debug
17 ANSIBLE_RETRY_FILES_ENABLED = False
18 ANSIBLE_SSH_RETRIES = 5
19 VAGRANT_CWD = {changedir}
20 CEPH_VOLUME_DEBUG = 1
494da23a 21 DEBIAN_FRONTEND=noninteractive
1adf2230
AA
22changedir=
23 centos7-filestore-single_type: {toxinidir}/centos7/filestore/single-type
24 centos7-filestore-single_type_dmcrypt: {toxinidir}/centos7/filestore/single-type-dmcrypt
91327a77
AA
25 centos7-filestore-mixed_type: {toxinidir}/centos7/filestore/mixed-type
26 centos7-filestore-mixed_type_dmcrypt: {toxinidir}/centos7/filestore/mixed-type-dmcrypt
11fdf7f2
TL
27 centos7-filestore-mixed_type_explicit: {toxinidir}/centos7/filestore/mixed-type-explicit
28 centos7-filestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos7/filestore/mixed-type-dmcrypt-explicit
1adf2230
AA
29 centos7-bluestore-single_type: {toxinidir}/centos7/bluestore/single-type
30 centos7-bluestore-single_type_dmcrypt: {toxinidir}/centos7/bluestore/single-type-dmcrypt
91327a77
AA
31 centos7-bluestore-mixed_type: {toxinidir}/centos7/bluestore/mixed-type
32 centos7-bluestore-mixed_type_dmcrypt: {toxinidir}/centos7/bluestore/mixed-type-dmcrypt
11fdf7f2
TL
33 centos7-bluestore-mixed_type_explicit: {toxinidir}/centos7/bluestore/mixed-type-explicit
34 centos7-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos7/bluestore/mixed-type-dmcrypt-explicit
1adf2230
AA
35 xenial-filestore-single_type: {toxinidir}/xenial/filestore/single-type
36 xenial-filestore-single_type_dmcrypt: {toxinidir}/xenial/filestore/single-type-dmcrypt
37 xenial-bluestore-single_type: {toxinidir}/xenial/bluestore/single-type
38 xenial-bluestore-single_type_dmcrypt: {toxinidir}/xenial/bluestore/single-type-dmcrypt
39commands=
40 git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
11fdf7f2 41 python -m pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
1adf2230
AA
42
43 bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
44 bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
45
46 cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
47
91327a77
AA
48 # individual scenario setup
49 ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
50
1adf2230
AA
51 # use ceph-ansible to deploy a ceph cluster on the vms
52 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
53
54 # prepare nodes for testing with testinfra
55 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
56
81eedcae
TL
57 # test cluster state using testinfra
58 py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
1adf2230
AA
59
60 # reboot all vms - attempt
61 bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
62
81eedcae
TL
63 # after a reboot, osds may take about 20 seconds to come back up
64 sleep 30
65
1adf2230 66 # retest to ensure cluster came back up correctly after rebooting
81eedcae 67 py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
1adf2230
AA
68
69 # destroy an OSD, zap it's device and recreate it using it's ID
70 ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
71
72 # retest to ensure cluster came back up correctly
81eedcae 73 py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
1adf2230 74
f64942e4
AA
75 # test zap OSDs by ID
76 ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml
77
1adf2230 78 vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}