]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
import 15.2.5
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / tox.ini
1 [tox]
2 envlist = centos8-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
3 skipsdist = True
4
5 [testenv]
6 deps = mock
7 whitelist_externals =
8 vagrant
9 bash
10 git
11 cp
12 sleep
13 passenv=*
14 setenv=
15 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
16 ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
17 ANSIBLE_STDOUT_CALLBACK = debug
18 ANSIBLE_RETRY_FILES_ENABLED = False
19 ANSIBLE_SSH_RETRIES = 5
20 VAGRANT_CWD = {changedir}
21 CEPH_VOLUME_DEBUG = 1
22 DEBIAN_FRONTEND=noninteractive
23 changedir=
24 # plain/unencrypted
25 centos8-filestore-create: {toxinidir}/centos8/filestore/create
26 centos8-bluestore-create: {toxinidir}/centos8/bluestore/create
27 # dmcrypt
28 centos8-filestore-dmcrypt: {toxinidir}/centos8/filestore/dmcrypt
29 centos8-bluestore-dmcrypt: {toxinidir}/centos8/bluestore/dmcrypt
30 # TODO: these are placeholders for now, eventually we want to
31 # test the prepare/activate workflow of ceph-volume as well
32 centos8-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
33 centos8-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
34 commands=
35 git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
36 pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
37
38 bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
39 bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
40
41 # create logical volumes to test with on the vms
42 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml
43
44 # ad-hoc/local test setup for lvm
45 ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
46
47 cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
48
49 # use ceph-ansible to deploy a ceph cluster on the vms
50 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
51
52 # prepare nodes for testing with testinfra
53 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
54
55 # test cluster state using testinfra
56 py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
57
58 # reboot all vms - attempt
59 bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
60
61 # after a reboot, osds may take about 20 seconds to come back up
62 sleep 30
63
64 # retest to ensure cluster came back up correctly after rebooting
65 py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
66
67 # destroy an OSD, zap it's device and recreate it using it's ID
68 ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
69
70 # retest to ensure cluster came back up correctly
71 py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
72
73 vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}