]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
26fbd8a7c4b699f194280d2075e845ad203430c3
[ceph.git] / ceph / src / pybind / mgr / dashboard / ci / cephadm / start-cluster.sh
1 #!/usr/bin/env bash
2
3 set -eEx
4
5 cleanup() {
6 set +x
7 if [[ -n "$JENKINS_HOME" ]]; then
8 echo "Starting cleanup..."
9 kcli delete plan -y ceph || true
10 kcli delete network ceph-dashboard -y
11 docker container prune -f
12 echo "Cleanup completed."
13 fi
14 }
15
16 on_error() {
17 set +x
18 if [ "$1" != "0" ]; then
19 echo "ERROR $1 thrown on line $2"
20 echo
21 echo "Collecting info..."
22 echo
23 echo "Saving MGR logs:"
24 echo
25 mkdir -p ${CEPH_DEV_FOLDER}/logs
26 kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager' > ${CEPH_DEV_FOLDER}/logs/mgr.cephadm.log
27 for vm_id in {0..3}
28 do
29 local vm="ceph-node-0${vm_id}"
30 echo "Saving journalctl from VM ${vm}:"
31 echo
32 kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' > ${CEPH_DEV_FOLDER}/logs/journal.ceph-node-0${vm_id}.log || true
33 echo "Saving container logs:"
34 echo
35 kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' > ${CEPH_DEV_FOLDER}/logs/container.ceph-node-0${vm_id}.log || true
36 done
37 echo "TEST FAILED."
38 fi
39 }
40
41 trap 'on_error $? $LINENO' ERR
42 trap 'cleanup $? $LINENO' EXIT
43
44 sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
45
46 : ${CEPH_DEV_FOLDER:=${PWD}}
47 EXTRA_PARAMS=''
48 DEV_MODE=''
49 # Check script args/options.
50 for arg in "$@"; do
51 shift
52 case "$arg" in
53 "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS+=" -P dev_mode=${DEV_MODE}" ;;
54 "--expanded") EXTRA_PARAMS+=" -P expanded_cluster=true" ;;
55 esac
56 done
57
58 kcli delete plan -y ceph || true
59
60 # Build dashboard frontend (required to start the module).
61 cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
62 export NG_CLI_ANALYTICS=false
63 if [[ -n "$JENKINS_HOME" ]]; then
64 npm cache clean --force
65 fi
66 npm ci
67 FRONTEND_BUILD_OPTS='-- --prod'
68 if [[ -n "${DEV_MODE}" ]]; then
69 FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch'
70 fi
71 npm run build ${FRONTEND_BUILD_OPTS} &
72
73 cd ${CEPH_DEV_FOLDER}
74 : ${VM_IMAGE:='fedora34'}
75 : ${VM_IMAGE_URL:='https://fedora.mirror.liteserver.nl/linux/releases/34/Cloud/x86_64/images/Fedora-Cloud-Base-34-1.2.x86_64.qcow2'}
76 kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE}
77 kcli delete plan -y ceph || true
78 kcli create plan -f src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \
79 -P ceph_dev_folder=${CEPH_DEV_FOLDER} \
80 ${EXTRA_PARAMS} ceph
81
82 : ${CLUSTER_DEBUG:=0}
83 : ${DASHBOARD_CHECK_INTERVAL:=10}
84 while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "kcli boot finished") ]]; do
85 sleep ${DASHBOARD_CHECK_INTERVAL}
86 kcli list vm
87 if [[ ${CLUSTER_DEBUG} != 0 ]]; then
88 kcli ssh -u root -- ceph-node-00 'podman ps -a'
89 kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s \$(podman ps -aq)'
90 fi
91 kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
92 done