]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / tox.ini
index 429b467f463c2138ab6a26f93b9c4e8faee4280d..cbdc7f263887356d75fca6615eda731453f90dc8 100644 (file)
@@ -7,6 +7,8 @@ whitelist_externals =
     vagrant
     bash
     git
+    cp
+    sleep
 passenv=*
 setenv=
   ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
@@ -16,10 +18,7 @@ setenv=
   ANSIBLE_SSH_RETRIES = 5
   VAGRANT_CWD = {changedir}
   CEPH_VOLUME_DEBUG = 1
-deps=
-  ansible==2.4.1
-  testinfra==1.7.1
-  pytest-xdist
+  DEBIAN_FRONTEND=noninteractive
 changedir=
   # plain/unencrypted
   centos7-filestore-create: {toxinidir}/centos7/filestore/create
@@ -39,8 +38,9 @@ changedir=
   centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
 commands=
   git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+  pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
 
-  vagrant up --no-provision {posargs:--provider=virtualbox}
+  bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
   bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
 
   # create logical volumes to test with on the vms
@@ -49,25 +49,30 @@ commands=
   # ad-hoc/local test setup for lvm
   ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
 
+  cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
+
   # use ceph-ansible to deploy a ceph cluster on the vms
-  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}"
+  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
 
   # prepare nodes for testing with testinfra
   ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
 
-  # test cluster state using ceph-ansible tests
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  # test cluster state using testinfra
+  py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # reboot all vms - attempt
   bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
 
+  # after a reboot, osds may take about 20 seconds to come back up
+  sleep 30
+
   # retest to ensure cluster came back up correctly after rebooting
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # destroy an OSD, zap it's device and recreate it using it's ID
   ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
 
   # retest to ensure cluster came back up correctly
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
-  vagrant destroy --force
+  vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}