]>
git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
bec30e6d7cb4f6ec92333cbcf424960ef17bcf33
2 envlist = centos8-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
15 ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
16 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
17 ANSIBLE_STDOUT_CALLBACK = debug
18 VAGRANT_CWD = {changedir}
20 DEBIAN_FRONTEND=noninteractive
23 centos8
-filestore
-create
: {toxinidir
}/centos8
/filestore
/create
24 centos8
-bluestore
-create
: {toxinidir
}/centos8
/bluestore
/create
26 centos8
-filestore
-dmcrypt
: {toxinidir
}/centos8
/filestore
/dmcrypt
27 centos8
-bluestore
-dmcrypt
: {toxinidir
}/centos8
/bluestore
/dmcrypt
28 # TODO: these are placeholders for now, eventually we want to
29 # test the prepare/activate workflow of ceph-volume as well
30 centos8
-filestore
-prepare_activate
: {toxinidir
}/xenial
/filestore
/prepare_activate
31 centos8
-bluestore
-prepare_activate
: {toxinidir
}/xenial
/bluestore
/prepare_activate
33 git clone
-b
{env
:CEPH_ANSIBLE_BRANCH
:master
} --single
-branch
{env
:CEPH_ANSIBLE_CLONE
:"https://github.com/ceph/ceph-ansible.git"} {envdir
}/tmp
/ceph
-ansible
34 pip install
-r
{envdir
}/tmp
/ceph
-ansible
/tests
/requirements.txt
36 bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
37 bash
{toxinidir
}/..
/scripts
/generate_ssh_config.sh
{changedir
}
39 # create logical volumes to test with on the vms
40 ansible
-playbook
-vv
-i
{changedir
}/hosts
{envdir
}/tmp
/ceph
-ansible
/tests
/functional
/lvm_setup.yml
42 # ad-hoc/local test setup for lvm
43 ansible
-playbook
-vv
-i
{changedir
}/hosts
{changedir
}/setup.yml
45 cp
{toxinidir
}/..
/playbooks
/deploy.yml
{envdir
}/tmp
/ceph
-ansible
47 # use ceph-ansible to deploy a ceph cluster on the vms
48 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
50 # prepare nodes for testing with testinfra
51 ansible
-playbook
-vv
-i
{changedir
}/hosts
{envdir
}/tmp
/ceph
-ansible
/tests
/functional
/setup.yml
53 # test cluster state using testinfra
54 py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
56 # reboot all vms - attempt
57 bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
59 # after a reboot, osds may take about 20 seconds to come back up
62 # retest to ensure cluster came back up correctly after rebooting
63 py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
65 # destroy an OSD, zap it's device and recreate it using it's ID
66 ansible
-playbook
-vv
-i
{changedir
}/hosts
{changedir
}/test.yml
68 # retest to ensure cluster came back up correctly
69 py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
71 vagrant destroy
{env
:VAGRANT_DESTROY_FLAGS
:"--force"}