]>
Commit | Line | Data |
---|---|---|
522d829b TL |
1 | #!/usr/bin/env bash |
2 | ||
3 | set -ex | |
4 | ||
5 | cleanup() { | |
6 | set +x | |
7 | if [[ -n "$JENKINS_HOME" ]]; then | |
8 | printf "\n\nStarting cleanup...\n\n" | |
9 | kcli delete plan -y ceph || true | |
10 | docker container prune -f | |
11 | printf "\n\nCleanup completed.\n\n" | |
12 | fi | |
13 | } | |
14 | ||
15 | on_error() { | |
16 | set +x | |
17 | if [ "$1" != "0" ]; then | |
18 | printf "\n\nERROR $1 thrown on line $2\n\n" | |
19 | printf "\n\nCollecting info...\n\n" | |
20 | printf "\n\nDisplaying MGR logs:\n\n" | |
21 | kcli ssh -u root -- ceph-node-00 'cephadm logs -n $(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1)' | |
22 | for vm_id in 0 1 2 | |
23 | do | |
24 | local vm="ceph-node-0${vm_id}" | |
25 | printf "\n\nDisplaying journalctl from VM ${vm}:\n\n" | |
26 | kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true | |
27 | printf "\n\nEnd of journalctl from VM ${vm}\n\n" | |
28 | printf "\n\nDisplaying container logs:\n\n" | |
29 | kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true | |
30 | done | |
31 | printf "\n\nTEST FAILED.\n\n" | |
32 | fi | |
33 | } | |
34 | ||
35 | trap 'on_error $? $LINENO' ERR | |
36 | trap 'cleanup $? $LINENO' EXIT | |
37 | ||
38 | sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts | |
39 | ||
40 | : ${CEPH_DEV_FOLDER:=${PWD}} | |
41 | EXTRA_PARAMS='' | |
42 | DEV_MODE='' | |
43 | # Check script args/options. | |
44 | for arg in "$@"; do | |
45 | shift | |
46 | case "$arg" in | |
47 | "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS+=" -P dev_mode=${DEV_MODE}" ;; | |
48 | "--expanded") EXTRA_PARAMS+=" -P expanded_cluster=true" ;; | |
49 | esac | |
50 | done | |
51 | ||
52 | kcli delete plan -y ceph || true | |
53 | ||
54 | # Build dashboard frontend (required to start the module). | |
55 | cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend | |
56 | export NG_CLI_ANALYTICS=false | |
57 | if [[ -n "$JENKINS_HOME" ]]; then | |
58 | npm cache clean --force | |
59 | fi | |
60 | npm ci | |
61 | FRONTEND_BUILD_OPTS='-- --prod' | |
62 | if [[ -n "${DEV_MODE}" ]]; then | |
63 | FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch' | |
64 | fi | |
65 | npm run build ${FRONTEND_BUILD_OPTS} & | |
66 | ||
67 | cd ${CEPH_DEV_FOLDER} | |
68 | : ${VM_IMAGE:='fedora34'} | |
69 | : ${VM_IMAGE_URL:='https://fedora.mirror.liteserver.nl/linux/releases/34/Cloud/x86_64/images/Fedora-Cloud-Base-34-1.2.x86_64.qcow2'} | |
70 | kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE} | |
71 | kcli delete plan -y ceph || true | |
72 | kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \ | |
73 | -P ceph_dev_folder=${CEPH_DEV_FOLDER} \ | |
74 | ${EXTRA_PARAMS} ceph | |
75 | ||
76 | : ${CLUSTER_DEBUG:=0} | |
77 | : ${DASHBOARD_CHECK_INTERVAL:=10} | |
78 | while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do | |
79 | sleep ${DASHBOARD_CHECK_INTERVAL} | |
80 | kcli list vm | |
81 | if [[ ${CLUSTER_DEBUG} != 0 ]]; then | |
82 | kcli ssh -u root -- ceph-node-00 'podman ps -a' | |
83 | kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)' | |
84 | fi | |
85 | kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init' | |
86 | done |