]>
git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
2 envlist = {centos7,xenial}-{filestore,bluestore}-{activate}
12 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
13 ANSIBLE_STDOUT_CALLBACK = debug
14 ANSIBLE_RETRY_FILES_ENABLED = False
15 VAGRANT_CWD = {changedir}
22 centos7
-filestore
-activate
: {toxinidir
}/centos7
/filestore
/activate
23 centos7
-bluestore
-activate
: {toxinidir
}/centos7
/bluestore
/activate
24 xenial
-filestore
-activate
: {toxinidir
}/xenial
/filestore
/activate
25 xenial
-bluestore
-activate
: {toxinidir
}/xenial
/bluestore
/activate
27 git clone
-b
{env
:CEPH_ANSIBLE_BRANCH
:master
} --single
-branch https
://github.com
/ceph
/ceph
-ansible.git
{envdir
}/tmp
/ceph
-ansible
29 vagrant up --no-provision {posargs:--provider=virtualbox}
30 bash
{toxinidir
}/..
/scripts
/generate_ssh_config.sh
{changedir
}
32 # use ceph-ansible to deploy a ceph cluster on the vms
33 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}"
35 # prepare nodes for testing with testinfra
36 ansible
-playbook
-vv
-i
{changedir
}/hosts
{envdir
}/tmp
/ceph
-ansible
/tests
/functional
/setup.yml
38 # test cluster state using ceph-ansible tests
39 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
41 # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
42 ansible
-playbook
-vv
-i
{changedir
}/hosts
{changedir
}/test.yml
45 vagrant reload
--no
-provision
47 # retest to ensure cluster came back up correctly after rebooting
48 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
50 vagrant destroy
--force