]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
import ceph quincy 17.2.6
[ceph.git] / ceph / src / pybind / mgr / dashboard / ci / cephadm / start-cluster.sh
CommitLineData
522d829b
TL
1#!/usr/bin/env bash
2
20effc67 3set -eEx
522d829b
TL
4
5cleanup() {
6 set +x
7 if [[ -n "$JENKINS_HOME" ]]; then
2a845540 8 echo "Starting cleanup..."
522d829b 9 kcli delete plan -y ceph || true
20effc67 10 kcli delete network ceph-dashboard -y
39ae355f
TL
11 kcli delete pool ceph-dashboard -y
12 rm -rf ${HOME}/.kcli
522d829b 13 docker container prune -f
2a845540 14 echo "Cleanup completed."
522d829b
TL
15 fi
16}
17
18on_error() {
19 set +x
20 if [ "$1" != "0" ]; then
2a845540
TL
21 echo "ERROR $1 thrown on line $2"
22 echo
23 echo "Collecting info..."
24 echo
25 echo "Saving MGR logs:"
26 echo
27 mkdir -p ${CEPH_DEV_FOLDER}/logs
28 kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager' > ${CEPH_DEV_FOLDER}/logs/mgr.cephadm.log
29 for vm_id in {0..3}
522d829b
TL
30 do
31 local vm="ceph-node-0${vm_id}"
2a845540
TL
32 echo "Saving journalctl from VM ${vm}:"
33 echo
34 kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' > ${CEPH_DEV_FOLDER}/logs/journal.ceph-node-0${vm_id}.log || true
35 echo "Saving container logs:"
36 echo
37 kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' > ${CEPH_DEV_FOLDER}/logs/container.ceph-node-0${vm_id}.log || true
522d829b 38 done
2a845540 39 echo "TEST FAILED."
522d829b
TL
40 fi
41}
42
43trap 'on_error $? $LINENO' ERR
44trap 'cleanup $? $LINENO' EXIT
45
39ae355f 46sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts || true
522d829b
TL
47
48: ${CEPH_DEV_FOLDER:=${PWD}}
49EXTRA_PARAMS=''
50DEV_MODE=''
51# Check script args/options.
52for arg in "$@"; do
53 shift
54 case "$arg" in
55 "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS+=" -P dev_mode=${DEV_MODE}" ;;
56 "--expanded") EXTRA_PARAMS+=" -P expanded_cluster=true" ;;
57 esac
58done
59
60kcli delete plan -y ceph || true
61
62# Build dashboard frontend (required to start the module).
63cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
64export NG_CLI_ANALYTICS=false
65if [[ -n "$JENKINS_HOME" ]]; then
66 npm cache clean --force
67fi
68npm ci
69FRONTEND_BUILD_OPTS='-- --prod'
70if [[ -n "${DEV_MODE}" ]]; then
71 FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch'
72fi
73npm run build ${FRONTEND_BUILD_OPTS} &
74
75cd ${CEPH_DEV_FOLDER}
39ae355f
TL
76: ${VM_IMAGE:='fedora36'}
77: ${VM_IMAGE_URL:='https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2'}
522d829b
TL
78kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE}
79kcli delete plan -y ceph || true
20effc67 80kcli create plan -f src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \
522d829b
TL
81 -P ceph_dev_folder=${CEPH_DEV_FOLDER} \
82 ${EXTRA_PARAMS} ceph
83
84: ${CLUSTER_DEBUG:=0}
85: ${DASHBOARD_CHECK_INTERVAL:=10}
20effc67 86while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "kcli boot finished") ]]; do
522d829b
TL
87 sleep ${DASHBOARD_CHECK_INTERVAL}
88 kcli list vm
89 if [[ ${CLUSTER_DEBUG} != 0 ]]; then
90 kcli ssh -u root -- ceph-node-00 'podman ps -a'
20effc67 91 kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s \$(podman ps -aq)'
522d829b
TL
92 fi
93 kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
94done