]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
import ceph quincy 17.2.4
[ceph.git] / ceph / src / pybind / mgr / dashboard / ci / cephadm / start-cluster.sh
CommitLineData
522d829b
TL
1#!/usr/bin/env bash
2
20effc67 3set -eEx
522d829b
TL
4
5cleanup() {
6 set +x
7 if [[ -n "$JENKINS_HOME" ]]; then
2a845540 8 echo "Starting cleanup..."
522d829b 9 kcli delete plan -y ceph || true
20effc67 10 kcli delete network ceph-dashboard -y
522d829b 11 docker container prune -f
2a845540 12 echo "Cleanup completed."
522d829b
TL
13 fi
14}
15
16on_error() {
17 set +x
18 if [ "$1" != "0" ]; then
2a845540
TL
19 echo "ERROR $1 thrown on line $2"
20 echo
21 echo "Collecting info..."
22 echo
23 echo "Saving MGR logs:"
24 echo
25 mkdir -p ${CEPH_DEV_FOLDER}/logs
26 kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager' > ${CEPH_DEV_FOLDER}/logs/mgr.cephadm.log
27 for vm_id in {0..3}
522d829b
TL
28 do
29 local vm="ceph-node-0${vm_id}"
2a845540
TL
30 echo "Saving journalctl from VM ${vm}:"
31 echo
32 kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' > ${CEPH_DEV_FOLDER}/logs/journal.ceph-node-0${vm_id}.log || true
33 echo "Saving container logs:"
34 echo
35 kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' > ${CEPH_DEV_FOLDER}/logs/container.ceph-node-0${vm_id}.log || true
522d829b 36 done
2a845540 37 echo "TEST FAILED."
522d829b
TL
38 fi
39}
40
41trap 'on_error $? $LINENO' ERR
42trap 'cleanup $? $LINENO' EXIT
43
44sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
45
46: ${CEPH_DEV_FOLDER:=${PWD}}
47EXTRA_PARAMS=''
48DEV_MODE=''
49# Check script args/options.
50for arg in "$@"; do
51 shift
52 case "$arg" in
53 "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS+=" -P dev_mode=${DEV_MODE}" ;;
54 "--expanded") EXTRA_PARAMS+=" -P expanded_cluster=true" ;;
55 esac
56done
57
58kcli delete plan -y ceph || true
59
60# Build dashboard frontend (required to start the module).
61cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
62export NG_CLI_ANALYTICS=false
63if [[ -n "$JENKINS_HOME" ]]; then
64 npm cache clean --force
65fi
66npm ci
67FRONTEND_BUILD_OPTS='-- --prod'
68if [[ -n "${DEV_MODE}" ]]; then
69 FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch'
70fi
71npm run build ${FRONTEND_BUILD_OPTS} &
72
73cd ${CEPH_DEV_FOLDER}
74: ${VM_IMAGE:='fedora34'}
75: ${VM_IMAGE_URL:='https://fedora.mirror.liteserver.nl/linux/releases/34/Cloud/x86_64/images/Fedora-Cloud-Base-34-1.2.x86_64.qcow2'}
76kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE}
77kcli delete plan -y ceph || true
20effc67 78kcli create plan -f src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \
522d829b
TL
79 -P ceph_dev_folder=${CEPH_DEV_FOLDER} \
80 ${EXTRA_PARAMS} ceph
81
82: ${CLUSTER_DEBUG:=0}
83: ${DASHBOARD_CHECK_INTERVAL:=10}
20effc67 84while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "kcli boot finished") ]]; do
522d829b
TL
85 sleep ${DASHBOARD_CHECK_INTERVAL}
86 kcli list vm
87 if [[ ${CLUSTER_DEBUG} != 0 ]]; then
88 kcli ssh -u root -- ceph-node-00 'podman ps -a'
20effc67 89 kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s \$(podman ps -aq)'
522d829b
TL
90 fi
91 kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
92done