]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
0c30fa7f5e6cb9b84a7240276db1ae89239a1a97
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / batch / tox.ini
1 [tox]
2 envlist = centos8-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos8-{bluestore,filestore}-{mixed_type,mixed_type_dmcrypt,mixed_type_explicit, mixed_type_dmcrypt_explicit}
3 skipsdist = True
4
5 [testenv]
6 deps = mock
7 allowlist_externals =
8 vagrant
9 bash
10 git
11 cp
12 sleep
13 passenv=*
14 setenv=
15 ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
16 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
17 ANSIBLE_STDOUT_CALLBACK = debug
18 VAGRANT_CWD = {changedir}
19 CEPH_VOLUME_DEBUG = 1
20 DEBIAN_FRONTEND=noninteractive
21 changedir=
22 centos8-filestore-single_type: {toxinidir}/centos8/filestore/single-type
23 centos8-filestore-single_type_dmcrypt: {toxinidir}/centos8/filestore/single-type-dmcrypt
24 centos8-filestore-mixed_type: {toxinidir}/centos8/filestore/mixed-type
25 centos8-filestore-mixed_type_dmcrypt: {toxinidir}/centos8/filestore/mixed-type-dmcrypt
26 centos8-filestore-mixed_type_explicit: {toxinidir}/centos8/filestore/mixed-type-explicit
27 centos8-filestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos8/filestore/mixed-type-dmcrypt-explicit
28 centos8-bluestore-single_type: {toxinidir}/centos8/bluestore/single-type
29 centos8-bluestore-single_type_dmcrypt: {toxinidir}/centos8/bluestore/single-type-dmcrypt
30 centos8-bluestore-mixed_type: {toxinidir}/centos8/bluestore/mixed-type
31 centos8-bluestore-mixed_type_dmcrypt: {toxinidir}/centos8/bluestore/mixed-type-dmcrypt
32 centos8-bluestore-mixed_type_explicit: {toxinidir}/centos8/bluestore/mixed-type-explicit
33 centos8-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos8/bluestore/mixed-type-dmcrypt-explicit
34 commands=
35 git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
36 python -m pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
37 ansible-galaxy install -r {envdir}/tmp/ceph-ansible/requirements.yml -v
38
39 # bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:""} {posargs:--provider=virtualbox}
40 bash {toxinidir}/../scripts/vagrant_up.sh {posargs:--provider=virtualbox}
41 bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
42
43 cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
44
45 ansible -vv -i {changedir}/hosts all -b -m package -a 'name=rpm state=latest'
46
47 # individual scenario setup
48 ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
49
50 # use ceph-ansible to deploy a ceph cluster on the vms
51 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
52
53 # prepare nodes for testing with testinfra
54 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
55
56 # test cluster state using testinfra
57 py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
58
59 # reboot all vms - attempt
60 bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
61
62 # after a reboot, osds may take about 20 seconds to come back up
63 sleep 30
64
65 # retest to ensure cluster came back up correctly after rebooting
66 py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
67
68 # destroy an OSD, zap it's device and recreate it using it's ID
69 ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
70
71 # retest to ensure cluster came back up correctly
72 py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
73
74 # test zap OSDs by ID
75 ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml
76
77 vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}