]>
Commit | Line | Data |
---|---|---|
d2e6a577 | 1 | [tox] |
b32b8144 | 2 | envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate,dmcrypt} |
d2e6a577 FG |
3 | skipsdist = True |
4 | ||
5 | [testenv] | |
6 | whitelist_externals = | |
7 | vagrant | |
8 | bash | |
9 | git | |
10 | passenv=* | |
11 | setenv= | |
12 | ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config | |
94b18763 | 13 | ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions |
d2e6a577 FG |
14 | ANSIBLE_STDOUT_CALLBACK = debug |
15 | ANSIBLE_RETRY_FILES_ENABLED = False | |
3a9019d9 | 16 | ANSIBLE_SSH_RETRIES = 5 |
d2e6a577 FG |
17 | VAGRANT_CWD = {changedir} |
18 | CEPH_VOLUME_DEBUG = 1 | |
19 | deps= | |
3efd9988 FG |
20 | ansible==2.4.1 |
21 | testinfra==1.7.1 | |
d2e6a577 | 22 | pytest-xdist |
28e407b8 | 23 | notario>=0.0.13 |
d2e6a577 | 24 | changedir= |
b32b8144 | 25 | # plain/unencrypted |
3efd9988 FG |
26 | centos7-filestore-create: {toxinidir}/centos7/filestore/create |
27 | centos7-bluestore-create: {toxinidir}/centos7/bluestore/create | |
28 | xenial-filestore-create: {toxinidir}/xenial/filestore/create | |
29 | xenial-bluestore-create: {toxinidir}/xenial/bluestore/create | |
b32b8144 FG |
30 | # dmcrypt |
31 | centos7-filestore-dmcrypt: {toxinidir}/centos7/filestore/dmcrypt | |
32 | centos7-bluestore-dmcrypt: {toxinidir}/centos7/bluestore/dmcrypt | |
33 | xenial-filestore-dmcrypt: {toxinidir}/xenial/filestore/dmcrypt | |
34 | xenial-bluestore-dmcrypt: {toxinidir}/xenial/bluestore/dmcrypt | |
d2e6a577 FG |
35 | # TODO: these are placeholders for now, eventually we want to |
36 | # test the prepare/activate workflow of ceph-volume as well | |
3efd9988 FG |
37 | xenial-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate |
38 | xenial-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate | |
39 | centos7-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate | |
40 | centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate | |
d2e6a577 FG |
41 | commands= |
42 | git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible | |
28e407b8 AA |
43 | # XXX Ideally we should be able to consume the requirements for ceph-ansible directly, |
44 | # but the master branch doesn't pin dependencies so we can't guarantee to work correctly | |
45 | #pip install -r {envdir}/tmp/ceph-ansible/requirements.txt | |
d2e6a577 FG |
46 | |
47 | vagrant up --no-provision {posargs:--provider=virtualbox} | |
3efd9988 | 48 | bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} |
d2e6a577 FG |
49 | |
50 | # create logical volumes to test with on the vms | |
51 | ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml | |
52 | ||
3efd9988 FG |
53 | # ad-hoc/local test setup for lvm |
54 | ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml | |
55 | ||
d2e6a577 FG |
56 | # use ceph-ansible to deploy a ceph cluster on the vms |
57 | ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" | |
58 | ||
59 | # prepare nodes for testing with testinfra | |
60 | ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml | |
61 | ||
62 | # test cluster state using ceph-ansible tests | |
63 | testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests | |
64 | ||
94b18763 FG |
65 | # reboot all vms - attempt |
66 | bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} | |
d2e6a577 FG |
67 | |
68 | # retest to ensure cluster came back up correctly after rebooting | |
69 | testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests | |
70 | ||
b32b8144 | 71 | # destroy an OSD, zap it's device and recreate it using it's ID |
3a9019d9 | 72 | ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml |
b32b8144 FG |
73 | |
74 | # retest to ensure cluster came back up correctly | |
3a9019d9 | 75 | testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests |
b32b8144 | 76 | |
d2e6a577 | 77 | vagrant destroy --force |