]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
bump version to 12.2.4-pve1
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / tox.ini
CommitLineData
d2e6a577 1[tox]
b32b8144 2envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
d2e6a577
FG
3skipsdist = True
4
5[testenv]
6whitelist_externals =
7 vagrant
8 bash
9 git
10passenv=*
11setenv=
12 ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
13 ANSIBLE_STDOUT_CALLBACK = debug
14 ANSIBLE_RETRY_FILES_ENABLED = False
3a9019d9 15 ANSIBLE_SSH_RETRIES = 5
d2e6a577
FG
16 VAGRANT_CWD = {changedir}
17 CEPH_VOLUME_DEBUG = 1
18deps=
3efd9988
FG
19 ansible==2.4.1
20 testinfra==1.7.1
d2e6a577
FG
21 pytest-xdist
22changedir=
b32b8144 23 # plain/unencrypted
3efd9988
FG
24 centos7-filestore-create: {toxinidir}/centos7/filestore/create
25 centos7-bluestore-create: {toxinidir}/centos7/bluestore/create
26 xenial-filestore-create: {toxinidir}/xenial/filestore/create
27 xenial-bluestore-create: {toxinidir}/xenial/bluestore/create
b32b8144
FG
28 # dmcrypt
29 centos7-filestore-dmcrypt: {toxinidir}/centos7/filestore/dmcrypt
30 centos7-bluestore-dmcrypt: {toxinidir}/centos7/bluestore/dmcrypt
31 xenial-filestore-dmcrypt: {toxinidir}/xenial/filestore/dmcrypt
32 xenial-bluestore-dmcrypt: {toxinidir}/xenial/bluestore/dmcrypt
d2e6a577
FG
33 # TODO: these are placeholders for now, eventually we want to
34 # test the prepare/activate workflow of ceph-volume as well
3efd9988
FG
35 xenial-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
36 xenial-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
37 centos7-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
38 centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
d2e6a577
FG
39commands=
40 git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
41
42 vagrant up --no-provision {posargs:--provider=virtualbox}
3efd9988 43 bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
d2e6a577
FG
44
45 # create logical volumes to test with on the vms
46 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml
47
3efd9988
FG
48 # ad-hoc/local test setup for lvm
49 ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
50
d2e6a577
FG
51 # use ceph-ansible to deploy a ceph cluster on the vms
52 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}"
53
54 # prepare nodes for testing with testinfra
55 ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
56
57 # test cluster state using ceph-ansible tests
58 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
59
60 # reboot all vms
61 vagrant reload --no-provision
62
63 # retest to ensure cluster came back up correctly after rebooting
64 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
65
b32b8144 66 # destroy an OSD, zap it's device and recreate it using it's ID
3a9019d9 67 ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
b32b8144
FG
68
69 # retest to ensure cluster came back up correctly
3a9019d9 70 testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
b32b8144 71
d2e6a577 72 vagrant destroy --force