]>
Commit | Line | Data |
---|---|---|
d2e6a577 | 1 | [tox] |
b32b8144 | 2 | envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate,dmcrypt} |
d2e6a577 FG |
3 | skipsdist = True |
4 | ||
5 | [testenv] | |
6 | whitelist_externals = | |
7 | vagrant | |
8 | bash | |
9 | git | |
1adf2230 AA |
10 | cp |
11 | sleep | |
d2e6a577 FG |
12 | passenv=* |
13 | setenv= | |
14 | ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config | |
94b18763 | 15 | ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions |
d2e6a577 FG |
16 | ANSIBLE_STDOUT_CALLBACK = debug |
17 | ANSIBLE_RETRY_FILES_ENABLED = False | |
3a9019d9 | 18 | ANSIBLE_SSH_RETRIES = 5 |
d2e6a577 FG |
19 | VAGRANT_CWD = {changedir} |
20 | CEPH_VOLUME_DEBUG = 1 | |
494da23a | 21 | DEBIAN_FRONTEND=noninteractive |
d2e6a577 | 22 | changedir= |
b32b8144 | 23 | # plain/unencrypted |
3efd9988 FG |
24 | centos7-filestore-create: {toxinidir}/centos7/filestore/create |
25 | centos7-bluestore-create: {toxinidir}/centos7/bluestore/create | |
26 | xenial-filestore-create: {toxinidir}/xenial/filestore/create | |
27 | xenial-bluestore-create: {toxinidir}/xenial/bluestore/create | |
b32b8144 FG |
28 | # dmcrypt |
29 | centos7-filestore-dmcrypt: {toxinidir}/centos7/filestore/dmcrypt | |
30 | centos7-bluestore-dmcrypt: {toxinidir}/centos7/bluestore/dmcrypt | |
31 | xenial-filestore-dmcrypt: {toxinidir}/xenial/filestore/dmcrypt | |
32 | xenial-bluestore-dmcrypt: {toxinidir}/xenial/bluestore/dmcrypt | |
d2e6a577 FG |
33 | # TODO: these are placeholders for now, eventually we want to |
34 | # test the prepare/activate workflow of ceph-volume as well | |
3efd9988 FG |
35 | xenial-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate |
36 | xenial-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate | |
37 | centos7-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate | |
38 | centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate | |
d2e6a577 FG |
39 | commands= |
40 | git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible | |
91327a77 | 41 | pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt |
d2e6a577 | 42 | |
1adf2230 | 43 | bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} |
3efd9988 | 44 | bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} |
d2e6a577 FG |
45 | |
46 | # create logical volumes to test with on the vms | |
47 | ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml | |
48 | ||
3efd9988 FG |
49 | # ad-hoc/local test setup for lvm |
50 | ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml | |
51 | ||
1adf2230 AA |
52 | cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible |
53 | ||
d2e6a577 | 54 | # use ceph-ansible to deploy a ceph cluster on the vms |
1adf2230 | 55 | ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}" |
d2e6a577 FG |
56 | |
57 | # prepare nodes for testing with testinfra | |
58 | ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml | |
59 | ||
a8e16298 | 60 | # test cluster state using testinfra |
81eedcae | 61 | py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests |
d2e6a577 | 62 | |
94b18763 FG |
63 | # reboot all vms - attempt |
64 | bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} | |
d2e6a577 | 65 | |
81eedcae TL |
66 | # after a reboot, osds may take about 20 seconds to come back up |
67 | sleep 30 | |
68 | ||
d2e6a577 | 69 | # retest to ensure cluster came back up correctly after rebooting |
81eedcae | 70 | py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests |
d2e6a577 | 71 | |
b32b8144 | 72 | # destroy an OSD, zap it's device and recreate it using it's ID |
3a9019d9 | 73 | ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml |
b32b8144 FG |
74 | |
75 | # retest to ensure cluster came back up correctly | |
81eedcae | 76 | py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests |
b32b8144 | 77 | |
11fdf7f2 | 78 | vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} |