]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
import quincy beta 17.1.0
[ceph.git] / ceph / src / pybind / mgr / dashboard / ci / cephadm / start-cluster.sh
1 #!/usr/bin/env bash
2
3 set -eEx
4
5 cleanup() {
6 set +x
7 if [[ -n "$JENKINS_HOME" ]]; then
8 printf "\n\nStarting cleanup...\n\n"
9 kcli delete plan -y ceph || true
10 kcli delete network ceph-dashboard -y
11 docker container prune -f
12 printf "\n\nCleanup completed.\n\n"
13 fi
14 }
15
16 on_error() {
17 set +x
18 if [ "$1" != "0" ]; then
19 printf "\n\nERROR $1 thrown on line $2\n\n"
20 printf "\n\nCollecting info...\n\n"
21 printf "\n\nDisplaying MGR logs:\n\n"
22 kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager'
23 for vm_id in 0 1 2
24 do
25 local vm="ceph-node-0${vm_id}"
26 printf "\n\nDisplaying journalctl from VM ${vm}:\n\n"
27 kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true
28 printf "\n\nEnd of journalctl from VM ${vm}\n\n"
29 printf "\n\nDisplaying container logs:\n\n"
30 kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' || true
31 done
32 printf "\n\nTEST FAILED.\n\n"
33 fi
34 }
35
36 trap 'on_error $? $LINENO' ERR
37 trap 'cleanup $? $LINENO' EXIT
38
39 sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
40
41 : ${CEPH_DEV_FOLDER:=${PWD}}
42 EXTRA_PARAMS=''
43 DEV_MODE=''
44 # Check script args/options.
45 for arg in "$@"; do
46 shift
47 case "$arg" in
48 "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS+=" -P dev_mode=${DEV_MODE}" ;;
49 "--expanded") EXTRA_PARAMS+=" -P expanded_cluster=true" ;;
50 esac
51 done
52
53 kcli delete plan -y ceph || true
54
55 # Build dashboard frontend (required to start the module).
56 cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
57 export NG_CLI_ANALYTICS=false
58 if [[ -n "$JENKINS_HOME" ]]; then
59 npm cache clean --force
60 fi
61 npm ci
62 FRONTEND_BUILD_OPTS='-- --prod'
63 if [[ -n "${DEV_MODE}" ]]; then
64 FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch'
65 fi
66 npm run build ${FRONTEND_BUILD_OPTS} &
67
68 cd ${CEPH_DEV_FOLDER}
69 : ${VM_IMAGE:='fedora34'}
70 : ${VM_IMAGE_URL:='https://fedora.mirror.liteserver.nl/linux/releases/34/Cloud/x86_64/images/Fedora-Cloud-Base-34-1.2.x86_64.qcow2'}
71 kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE}
72 kcli delete plan -y ceph || true
73 kcli create plan -f src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \
74 -P ceph_dev_folder=${CEPH_DEV_FOLDER} \
75 ${EXTRA_PARAMS} ceph
76
77 : ${CLUSTER_DEBUG:=0}
78 : ${DASHBOARD_CHECK_INTERVAL:=10}
79 while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "kcli boot finished") ]]; do
80 sleep ${DASHBOARD_CHECK_INTERVAL}
81 kcli list vm
82 if [[ ${CLUSTER_DEBUG} != 0 ]]; then
83 kcli ssh -u root -- ceph-node-00 'podman ps -a'
84 kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s \$(podman ps -aq)'
85 fi
86 kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
87 done