]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
import ceph 15.2.14
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / tox.ini
index 2b63875bf438d0969dd3c90015a065db617dbcb2..bec30e6d7cb4f6ec92333cbcf424960ef17bcf33 100644 (file)
@@ -12,11 +12,9 @@ whitelist_externals =
     sleep
 passenv=*
 setenv=
-  ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
-  ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
+  ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
+  ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
   ANSIBLE_STDOUT_CALLBACK = debug
-  ANSIBLE_RETRY_FILES_ENABLED = False
-  ANSIBLE_SSH_RETRIES = 5
   VAGRANT_CWD = {changedir}
   CEPH_VOLUME_DEBUG = 1
   DEBIAN_FRONTEND=noninteractive
@@ -53,7 +51,7 @@ commands=
   ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
 
   # test cluster state using testinfra
-  py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+  py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # reboot all vms - attempt
   bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
@@ -62,12 +60,12 @@ commands=
   sleep 30
 
   # retest to ensure cluster came back up correctly after rebooting
-  py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+  py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # destroy an OSD, zap it's device and recreate it using it's ID
   ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
 
   # retest to ensure cluster came back up correctly
-  py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+  py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}