]>
git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
2 envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
12 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
13 ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
14 ANSIBLE_STDOUT_CALLBACK = debug
15 ANSIBLE_RETRY_FILES_ENABLED = False
16 ANSIBLE_SSH_RETRIES = 5
17 VAGRANT_CWD = {changedir}
26 centos7
-filestore
-create
: {toxinidir
}/centos7
/filestore
/create
27 centos7
-bluestore
-create
: {toxinidir
}/centos7
/bluestore
/create
28 xenial
-filestore
-create
: {toxinidir
}/xenial
/filestore
/create
29 xenial
-bluestore
-create
: {toxinidir
}/xenial
/bluestore
/create
31 centos7
-filestore
-dmcrypt
: {toxinidir
}/centos7
/filestore
/dmcrypt
32 centos7
-bluestore
-dmcrypt
: {toxinidir
}/centos7
/bluestore
/dmcrypt
33 xenial
-filestore
-dmcrypt
: {toxinidir
}/xenial
/filestore
/dmcrypt
34 xenial
-bluestore
-dmcrypt
: {toxinidir
}/xenial
/bluestore
/dmcrypt
35 # TODO: these are placeholders for now, eventually we want to
36 # test the prepare/activate workflow of ceph-volume as well
37 xenial
-filestore
-prepare_activate
: {toxinidir
}/xenial
/filestore
/prepare_activate
38 xenial
-bluestore
-prepare_activate
: {toxinidir
}/xenial
/bluestore
/prepare_activate
39 centos7
-filestore
-prepare_activate
: {toxinidir
}/xenial
/filestore
/prepare_activate
40 centos7
-bluestore
-prepare_activate
: {toxinidir
}/xenial
/bluestore
/prepare_activate
42 git clone
-b
{env
:CEPH_ANSIBLE_BRANCH
:master
} --single
-branch https
://github.com
/ceph
/ceph
-ansible.git
{envdir
}/tmp
/ceph
-ansible
43 # XXX Ideally we should be able to consume the requirements for ceph-ansible directly,
44 # but the master branch doesn't pin dependencies so we can't guarantee to work correctly
45 #pip install -r {envdir}/tmp/ceph-ansible/requirements.txt
47 vagrant up --no-provision {posargs:--provider=virtualbox}
48 bash
{toxinidir
}/..
/scripts
/generate_ssh_config.sh
{changedir
}
50 # create logical volumes to test with on the vms
51 ansible
-playbook
-vv
-i
{changedir
}/hosts
{envdir
}/tmp
/ceph
-ansible
/tests
/functional
/lvm_setup.yml
53 # ad-hoc/local test setup for lvm
54 ansible
-playbook
-vv
-i
{changedir
}/hosts
{changedir
}/setup.yml
56 # use ceph-ansible to deploy a ceph cluster on the vms
57 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}"
59 # prepare nodes for testing with testinfra
60 ansible
-playbook
-vv
-i
{changedir
}/hosts
{envdir
}/tmp
/ceph
-ansible
/tests
/functional
/setup.yml
62 # test cluster state using ceph-ansible tests
63 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
65 # reboot all vms - attempt
66 bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
68 # retest to ensure cluster came back up correctly after rebooting
69 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
71 # destroy an OSD, zap it's device and recreate it using it's ID
72 ansible
-playbook
-vv
-i
{changedir
}/hosts
{changedir
}/test.yml
74 # retest to ensure cluster came back up correctly
75 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
77 vagrant destroy
--force