vagrant
bash
git
+ cp
+ sleep
passenv=*
setenv=
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
ANSIBLE_SSH_RETRIES = 5
VAGRANT_CWD = {changedir}
CEPH_VOLUME_DEBUG = 1
-deps=
- ansible==2.4.1
- testinfra==1.7.1
- pytest-xdist
+ DEBIAN_FRONTEND=noninteractive
changedir=
# plain/unencrypted
centos7-filestore-create: {toxinidir}/centos7/filestore/create
centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
commands=
git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+ pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
- vagrant up --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
# create logical volumes to test with on the vms
# ad-hoc/local test setup for lvm
ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
+ cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
+
# use ceph-ansible to deploy a ceph cluster on the vms
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}"
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
# prepare nodes for testing with testinfra
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
- # test cluster state using ceph-ansible tests
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ # test cluster state using testinfra
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# reboot all vms - attempt
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+ # after a reboot, osds may take about 20 seconds to come back up
+ sleep 30
+
# retest to ensure cluster came back up correctly after rebooting
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# destroy an OSD, zap it's device and recreate it using it's ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
# retest to ensure cluster came back up correctly
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
- vagrant destroy --force
+ vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}