]> git.proxmox.com Git - ceph.git/blame - ceph/src/vstart.sh
import ceph pacific 16.2.5
[ceph.git] / ceph / src / vstart.sh
CommitLineData
11fdf7f2 1#!/usr/bin/env bash
9f95a23c
TL
2# -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
3# vim: softtabstop=4 shiftwidth=4 expandtab
7c673cae
FG
4
5# abort on failure
6set -e
7
9f95a23c
TL
8quoted_print() {
9 for s in "$@"; do
10 if [[ "$s" =~ \ ]]; then
11 printf -- "'%s' " "$s"
12 else
13 printf -- "$s "
14 fi
15 done
16 printf '\n'
17}
18
19debug() {
20 "$@" >&2
21}
22
23prunb() {
24 debug quoted_print "$@" '&'
25 "$@" &
26}
27
28prun() {
29 debug quoted_print "$@"
30 "$@"
31}
32
33
7c673cae 34if [ -n "$VSTART_DEST" ]; then
9f95a23c
TL
35 SRC_PATH=`dirname $0`
36 SRC_PATH=`(cd $SRC_PATH; pwd)`
7c673cae 37
9f95a23c
TL
38 CEPH_DIR=$SRC_PATH
39 CEPH_BIN=${PWD}/bin
40 CEPH_LIB=${PWD}/lib
7c673cae 41
9f95a23c
TL
42 CEPH_CONF_PATH=$VSTART_DEST
43 CEPH_DEV_DIR=$VSTART_DEST/dev
44 CEPH_OUT_DIR=$VSTART_DEST/out
45 CEPH_ASOK_DIR=$VSTART_DEST/out
7c673cae
FG
46fi
47
11fdf7f2
TL
48get_cmake_variable() {
49 local variable=$1
9f95a23c 50 grep "${variable}:" CMakeCache.txt | cut -d "=" -f 2
11fdf7f2
TL
51}
52
7c673cae
FG
53# for running out of the CMake build directory
54if [ -e CMakeCache.txt ]; then
9f95a23c
TL
55 # Out of tree build, learn source location from CMakeCache.txt
56 CEPH_ROOT=$(get_cmake_variable ceph_SOURCE_DIR)
57 CEPH_BUILD_DIR=`pwd`
58 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
7c673cae
FG
59fi
60
9f95a23c 61# use CEPH_BUILD_ROOT to vstart from a 'make install'
7c673cae 62if [ -n "$CEPH_BUILD_ROOT" ]; then
9f95a23c
TL
63 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
64 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
f67539c2 65 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_ROOT/external/lib
9f95a23c
TL
66 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
67 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
68 # make install should install python extensions into PYTHONPATH
7c673cae 69elif [ -n "$CEPH_ROOT" ]; then
9f95a23c
TL
70 [ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL=$CEPH_ROOT/src/tools/cephfs/cephfs-shell
71 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
72 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
73 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
74 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
75 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
f67539c2 76 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_DIR/external/lib
9f95a23c
TL
77 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
78 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
79 [ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON=$CEPH_ROOT/src/python-common
7c673cae
FG
80fi
81
82if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
83 PATH=$(pwd):$PATH
84fi
85
86[ -z "$PYBIND" ] && PYBIND=./pybind
87
9f95a23c
TL
88[ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON="$CEPH_PYTHON_COMMON:"
89CYTHON_PYTHONPATH="$CEPH_LIB/cython_modules/lib.3"
90export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
91
f67539c2
TL
92export LD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$LD_LIBRARY_PATH
93export DYLD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$DYLD_LIBRARY_PATH
31f18b77 94# Suppress logging for regular use that indicated that we are using a
9f95a23c 95# development version. vstart.sh is only used during testing and
31f18b77
FG
96# development
97export CEPH_DEV=1
7c673cae
FG
98
99[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
100[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
101[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
102[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
103[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
104[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
f6b5b4d7 105[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM="$NFS"
7c673cae
FG
106
107# if none of the CEPH_NUM_* number is specified, kill the existing
108# cluster.
109if [ -z "$CEPH_NUM_MON" -a \
110 -z "$CEPH_NUM_OSD" -a \
111 -z "$CEPH_NUM_MDS" -a \
9f95a23c
TL
112 -z "$CEPH_NUM_MGR" -a \
113 -z "$GANESHA_DAEMON_NUM" ]; then
7c673cae
FG
114 kill_all=1
115else
116 kill_all=0
117fi
118
119[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
120[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
121[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
122[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
123[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
124[ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
125[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
9f95a23c 126[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM=0
7c673cae
FG
127
128[ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
129[ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
130[ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
131[ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
132[ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
133
134if [ $CEPH_NUM_OSD -gt 3 ]; then
135 OSD_POOL_DEFAULT_SIZE=3
136else
137 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
138fi
139
140extra_conf=""
141new=0
142standby=0
143debug=0
144ip=""
145nodaemon=0
9f95a23c 146redirect=0
7c673cae
FG
147smallmds=0
148short=0
149ec=0
9f95a23c
TL
150cephadm=0
151parallel=true
7c673cae 152hitset=""
9f95a23c 153overwrite_conf=0
7c673cae 154cephx=1 #turn cephx on by default
11fdf7f2 155gssapi_authx=0
7c673cae 156cache=""
11fdf7f2
TL
157if [ `uname` = FreeBSD ]; then
158 objectstore="filestore"
159else
160 objectstore="bluestore"
161fi
9f95a23c 162ceph_osd=ceph-osd
11fdf7f2 163rgw_frontend="beast"
224ce89b 164rgw_compression=""
7c673cae 165lockdep=${LOCKDEP:-1}
11fdf7f2 166spdk_enabled=0 #disable SPDK by default
f67539c2
TL
167zoned_enabled=0
168io_uring_enabled=0
169with_jaeger=0
11fdf7f2
TL
170
171with_mgr_dashboard=true
172if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
173 [[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
9f95a23c
TL
174 debug echo "ceph-mgr dashboard not built - disabling."
175 with_mgr_dashboard=false
11fdf7f2 176fi
7c673cae 177
31f18b77 178filestore_path=
11fdf7f2 179kstore_path=
9f95a23c 180bluestore_dev=
31f18b77 181
7c673cae
FG
182VSTART_SEC="client.vstart.sh"
183
184MON_ADDR=""
31f18b77
FG
185DASH_URLS=""
186RESTFUL_URLS=""
7c673cae
FG
187
188conf_fn="$CEPH_CONF_PATH/ceph.conf"
189keyring_fn="$CEPH_CONF_PATH/keyring"
190osdmap_fn="/tmp/ceph_osdmap.$$"
191monmap_fn="/tmp/ceph_monmap.$$"
9f95a23c 192inc_osd_num=0
7c673cae 193
11fdf7f2
TL
194msgr="21"
195
f6b5b4d7 196usage="usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d\n"
7c673cae
FG
197usage=$usage"options:\n"
198usage=$usage"\t-d, --debug\n"
199usage=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
200usage=$usage"\t-l, --localhost: use localhost instead of hostname\n"
201usage=$usage"\t-i <ip>: bind to specific ip\n"
202usage=$usage"\t-n, --new\n"
7c673cae
FG
203usage=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
204usage=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
9f95a23c
TL
205usage=$usage"\t--redirect-output: only useful with nodaemon, directs output to log file\n"
206usage=$usage"\t--smallmds: limit mds cache memory limit\n"
7c673cae 207usage=$usage"\t-m ip:port\t\tspecify monitor address\n"
9f95a23c 208usage=$usage"\t-k keep old configuration files (default)\n"
7c673cae
FG
209usage=$usage"\t-x enable cephx (on by default)\n"
210usage=$usage"\t-X disable cephx\n"
11fdf7f2
TL
211usage=$usage"\t-g --gssapi enable Kerberos/GSSApi authentication\n"
212usage=$usage"\t-G disable Kerberos/GSSApi authentication\n"
7c673cae
FG
213usage=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
214usage=$usage"\t-e : create an erasure pool\n";
215usage=$usage"\t-o config\t\t add extra config parameters to all sections\n"
7c673cae
FG
216usage=$usage"\t--rgw_port specify ceph rgw http listen port\n"
217usage=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
224ce89b 218usage=$usage"\t--rgw_compression specify the rgw compression plugin\n"
11fdf7f2
TL
219usage=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend (default)\n"
220usage=$usage"\t-f, --filestore use filestore as the osd objectstore backend\n"
221usage=$usage"\t-K, --kstore use kstore as the osd objectstore backend\n"
7c673cae
FG
222usage=$usage"\t--memstore use memstore as the osd objectstore backend\n"
223usage=$usage"\t--cache <pool>: enable cache tiering on pool\n"
224usage=$usage"\t--short: short object names only; necessary for ext4 dev\n"
225usage=$usage"\t--nolockdep disable lockdep\n"
226usage=$usage"\t--multimds <count> allow multimds with maximum active count\n"
11fdf7f2 227usage=$usage"\t--without-dashboard: do not run using mgr dashboard\n"
f67539c2 228usage=$usage"\t--bluestore-spdk: enable SPDK and with a comma-delimited list of PCI-IDs of NVME device (e.g, 0000:81:00.0)\n"
11fdf7f2
TL
229usage=$usage"\t--msgr1: use msgr1 only\n"
230usage=$usage"\t--msgr2: use msgr2 only\n"
231usage=$usage"\t--msgr21: use msgr2 and msgr1\n"
9f95a23c
TL
232usage=$usage"\t--crimson: use crimson-osd instead of ceph-osd\n"
233usage=$usage"\t--osd-args: specify any extra osd specific options\n"
234usage=$usage"\t--bluestore-devs: comma-separated list of blockdevs to use for bluestore\n"
f67539c2
TL
235usage=$usage"\t--bluestore-zoned: blockdevs listed by --bluestore-devs are zoned devices (HM-SMR HDD or ZNS SSD)\n"
236usage=$usage"\t--bluestore-io-uring: enable io_uring backend\n"
9f95a23c
TL
237usage=$usage"\t--inc-osd: append some more osds into existing vcluster\n"
238usage=$usage"\t--cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]\n"
239usage=$usage"\t--no-parallel: dont start all OSDs in parallel\n"
f67539c2 240usage=$usage"\t--jaeger: use jaegertracing for tracing\n"
7c673cae
FG
241
242usage_exit() {
9f95a23c
TL
243 printf "$usage"
244 exit
7c673cae
FG
245}
246
247while [ $# -ge 1 ]; do
248case $1 in
f67539c2 249 -d | --debug)
9f95a23c
TL
250 debug=1
251 ;;
7c673cae 252 -s | --standby_mds)
9f95a23c
TL
253 standby=1
254 ;;
f67539c2 255 -l | --localhost)
9f95a23c
TL
256 ip="127.0.0.1"
257 ;;
f67539c2 258 -i)
9f95a23c
TL
259 [ -z "$2" ] && usage_exit
260 ip="$2"
261 shift
262 ;;
f67539c2 263 -e)
9f95a23c
TL
264 ec=1
265 ;;
f67539c2 266 --new | -n)
9f95a23c
TL
267 new=1
268 ;;
f67539c2 269 --inc-osd)
9f95a23c
TL
270 new=0
271 kill_all=0
272 inc_osd_num=$2
273 if [ "$inc_osd_num" == "" ]; then
274 inc_osd_num=1
275 else
276 shift
277 fi
278 ;;
f67539c2 279 --short)
9f95a23c
TL
280 short=1
281 ;;
f67539c2 282 --crimson)
9f95a23c
TL
283 ceph_osd=crimson-osd
284 ;;
f67539c2 285 --osd-args)
9f95a23c
TL
286 extra_osd_args="$2"
287 shift
288 ;;
f67539c2 289 --msgr1)
9f95a23c
TL
290 msgr="1"
291 ;;
f67539c2 292 --msgr2)
9f95a23c
TL
293 msgr="2"
294 ;;
f67539c2 295 --msgr21)
9f95a23c
TL
296 msgr="21"
297 ;;
f67539c2 298 --cephadm)
9f95a23c
TL
299 cephadm=1
300 ;;
f67539c2 301 --no-parallel)
9f95a23c
TL
302 parallel=false
303 ;;
f67539c2 304 --valgrind)
9f95a23c
TL
305 [ -z "$2" ] && usage_exit
306 valgrind=$2
307 shift
308 ;;
f67539c2 309 --valgrind_args)
9f95a23c
TL
310 valgrind_args="$2"
311 shift
312 ;;
f67539c2 313 --valgrind_mds)
9f95a23c
TL
314 [ -z "$2" ] && usage_exit
315 valgrind_mds=$2
316 shift
317 ;;
f67539c2 318 --valgrind_osd)
9f95a23c
TL
319 [ -z "$2" ] && usage_exit
320 valgrind_osd=$2
321 shift
322 ;;
f67539c2 323 --valgrind_mon)
9f95a23c
TL
324 [ -z "$2" ] && usage_exit
325 valgrind_mon=$2
326 shift
327 ;;
f67539c2 328 --valgrind_mgr)
9f95a23c
TL
329 [ -z "$2" ] && usage_exit
330 valgrind_mgr=$2
331 shift
332 ;;
f67539c2 333 --valgrind_rgw)
9f95a23c
TL
334 [ -z "$2" ] && usage_exit
335 valgrind_rgw=$2
336 shift
337 ;;
f67539c2 338 --nodaemon)
9f95a23c
TL
339 nodaemon=1
340 ;;
341 --redirect-output)
342 redirect=1
343 ;;
f67539c2 344 --smallmds)
9f95a23c
TL
345 smallmds=1
346 ;;
f67539c2 347 --rgw_port)
9f95a23c
TL
348 CEPH_RGW_PORT=$2
349 shift
350 ;;
f67539c2 351 --rgw_frontend)
9f95a23c
TL
352 rgw_frontend=$2
353 shift
354 ;;
f67539c2 355 --rgw_compression)
9f95a23c
TL
356 rgw_compression=$2
357 shift
358 ;;
f67539c2 359 --kstore_path)
9f95a23c
TL
360 kstore_path=$2
361 shift
362 ;;
f67539c2 363 --filestore_path)
9f95a23c
TL
364 filestore_path=$2
365 shift
366 ;;
f67539c2 367 -m)
9f95a23c
TL
368 [ -z "$2" ] && usage_exit
369 MON_ADDR=$2
370 shift
371 ;;
f67539c2 372 -x)
9f95a23c
TL
373 cephx=1 # this is on be default, flag exists for historical consistency
374 ;;
f67539c2 375 -X)
9f95a23c
TL
376 cephx=0
377 ;;
378
11fdf7f2 379 -g | --gssapi)
9f95a23c
TL
380 gssapi_authx=1
381 ;;
11fdf7f2 382 -G)
9f95a23c
TL
383 gssapi_authx=0
384 ;;
11fdf7f2 385
f67539c2 386 -k)
9f95a23c
TL
387 if [ ! -r $conf_fn ]; then
388 echo "cannot use old configuration: $conf_fn not readable." >&2
389 exit
390 fi
391 new=0
392 ;;
f67539c2 393 --memstore)
9f95a23c
TL
394 objectstore="memstore"
395 ;;
f67539c2 396 -b | --bluestore)
9f95a23c
TL
397 objectstore="bluestore"
398 ;;
f67539c2 399 -f | --filestore)
9f95a23c
TL
400 objectstore="filestore"
401 ;;
f67539c2 402 -K | --kstore)
9f95a23c
TL
403 objectstore="kstore"
404 ;;
f67539c2 405 --hitset)
9f95a23c
TL
406 hitset="$hitset $2 $3"
407 shift
408 shift
409 ;;
f67539c2
TL
410 -o)
411 extra_conf+=$'\n'"$2"
9f95a23c
TL
412 shift
413 ;;
f67539c2 414 --cache)
9f95a23c
TL
415 if [ -z "$cache" ]; then
416 cache="$2"
417 else
418 cache="$cache $2"
419 fi
420 shift
421 ;;
f67539c2 422 --nolockdep)
9f95a23c
TL
423 lockdep=0
424 ;;
7c673cae
FG
425 --multimds)
426 CEPH_MAX_MDS="$2"
427 shift
428 ;;
11fdf7f2
TL
429 --without-dashboard)
430 with_mgr_dashboard=false
431 ;;
f67539c2 432 --bluestore-spdk)
11fdf7f2 433 [ -z "$2" ] && usage_exit
f67539c2 434 IFS=',' read -r -a bluestore_spdk_dev <<< "$2"
11fdf7f2
TL
435 spdk_enabled=1
436 shift
437 ;;
f67539c2 438 --bluestore-devs)
9f95a23c
TL
439 IFS=',' read -r -a bluestore_dev <<< "$2"
440 for dev in "${bluestore_dev[@]}"; do
441 if [ ! -b $dev -o ! -w $dev ]; then
442 echo "All --bluestore-devs must refer to writable block devices"
443 exit 1
444 fi
445 done
446 shift
447 ;;
f67539c2
TL
448 --bluestore-zoned)
449 zoned_enabled=1
450 ;;
451 --bluestore-io-uring)
452 io_uring_enabled=1
453 shift
454 ;;
455 --jaeger)
456 with_jaeger=1
457 echo "with_jaeger $with_jaeger"
458 ;;
459 *)
9f95a23c 460 usage_exit
7c673cae
FG
461esac
462shift
463done
464
465if [ $kill_all -eq 1 ]; then
466 $SUDO $INIT_CEPH stop
467fi
468
9f95a23c
TL
469if [ "$new" -eq 0 ]; then
470 if [ -z "$CEPH_ASOK_DIR" ]; then
471 CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
472 fi
c07f9fc5 473 mkdir -p $CEPH_ASOK_DIR
9f95a23c 474 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
7c673cae 475 CEPH_NUM_MON="$MON"
9f95a23c 476 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
7c673cae 477 CEPH_NUM_OSD="$OSD"
9f95a23c 478 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
7c673cae 479 CEPH_NUM_MDS="$MDS"
9f95a23c 480 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
7c673cae 481 CEPH_NUM_MGR="$MGR"
9f95a23c 482 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
7c673cae 483 CEPH_NUM_RGW="$RGW"
f6b5b4d7
TL
484 NFS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
485 GANESHA_DAEMON_NUM="$NFS"
7c673cae 486else
9f95a23c
TL
487 # only delete if -n
488 if [ -e "$conf_fn" ]; then
489 asok_dir=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
490 rm -- "$conf_fn"
491 if [ $asok_dir != /var/run/ceph ]; then
c07f9fc5 492 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
c07f9fc5 493 fi
9f95a23c
TL
494 fi
495 if [ -z "$CEPH_ASOK_DIR" ]; then
496 CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
7c673cae
FG
497 fi
498fi
499
500ARGS="-c $conf_fn"
501
7c673cae
FG
502run() {
503 type=$1
504 shift
9f95a23c
TL
505 num=$1
506 shift
7c673cae
FG
507 eval "valg=\$valgrind_$type"
508 [ -z "$valg" ] && valg="$valgrind"
509
510 if [ -n "$valg" ]; then
511 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
512 sleep 1
513 else
514 if [ "$nodaemon" -eq 0 ]; then
515 prun "$@"
9f95a23c
TL
516 elif [ "$redirect" -eq 0 ]; then
517 prunb ${CEPH_ROOT}/src/ceph-run "$@" -f
7c673cae 518 else
9f95a23c 519 ( prunb ${CEPH_ROOT}/src/ceph-run "$@" -f ) >$CEPH_OUT_DIR/$type.$num.stdout 2>&1
7c673cae
FG
520 fi
521 fi
522}
523
524wconf() {
9f95a23c
TL
525 if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
526 cat >> "$conf_fn"
527 fi
7c673cae
FG
528}
529
9f95a23c
TL
530
531do_rgw_conf() {
532
533 if [ $CEPH_NUM_RGW -eq 0 ]; then
534 return 0
535 fi
536
537 # setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
538 # individual rgw's ids will be their ports.
539 current_port=$CEPH_RGW_PORT
540 for n in $(seq 1 $CEPH_NUM_RGW); do
541 wconf << EOF
542[client.rgw.${current_port}]
543 rgw frontends = $rgw_frontend port=${current_port}
544 admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
545EOF
546 current_port=$((current_port + 1))
547done
548
11fdf7f2
TL
549}
550
f67539c2
TL
551format_conf() {
552 local opts=$1
553 local indent=" "
554 local opt
555 local formatted
556 while read -r opt; do
557 if [ -z "$formatted" ]; then
558 formatted="${opt}"
559 else
560 formatted+=$'\n'${indent}${opt}
561 fi
562 done <<< "$opts"
563 echo "$formatted"
564}
565
7c673cae
FG
566prepare_conf() {
567 local DAEMONOPTS="
568 log file = $CEPH_OUT_DIR/\$name.log
c07f9fc5 569 admin socket = $CEPH_ASOK_DIR/\$name.asok
7c673cae
FG
570 chdir = \"\"
571 pid file = $CEPH_OUT_DIR/\$name.pid
572 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
573"
574
11fdf7f2
TL
575 local mgr_modules="restful iostat"
576 if $with_mgr_dashboard; then
9f95a23c 577 mgr_modules="dashboard $mgr_modules"
11fdf7f2
TL
578 fi
579
580 local msgr_conf=''
581 if [ $msgr -eq 21 ]; then
f67539c2
TL
582 msgr_conf="ms bind msgr2 = true
583 ms bind msgr1 = true"
11fdf7f2
TL
584 fi
585 if [ $msgr -eq 2 ]; then
f67539c2
TL
586 msgr_conf="ms bind msgr2 = true
587 ms bind msgr1 = false"
11fdf7f2
TL
588 fi
589 if [ $msgr -eq 1 ]; then
f67539c2
TL
590 msgr_conf="ms bind msgr2 = false
591 ms bind msgr1 = true"
11fdf7f2
TL
592 fi
593
7c673cae
FG
594 wconf <<EOF
595; generated by vstart.sh on `date`
596[$VSTART_SEC]
597 num mon = $CEPH_NUM_MON
598 num osd = $CEPH_NUM_OSD
599 num mds = $CEPH_NUM_MDS
600 num mgr = $CEPH_NUM_MGR
601 num rgw = $CEPH_NUM_RGW
9f95a23c 602 num ganesha = $GANESHA_DAEMON_NUM
7c673cae
FG
603
604[global]
605 fsid = $(uuidgen)
7c673cae 606 osd failsafe full ratio = .99
11fdf7f2 607 mon osd full ratio = .99
c07f9fc5
FG
608 mon osd nearfull ratio = .99
609 mon osd backfillfull ratio = .99
9f95a23c 610 mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
7c673cae
FG
611 erasure code dir = $EC_PATH
612 plugin dir = $CEPH_LIB
7c673cae
FG
613 filestore fd cache size = 32
614 run dir = $CEPH_OUT_DIR
9f95a23c 615 crash dir = $CEPH_OUT_DIR
7c673cae 616 enable experimental unrecoverable data corrupting features = *
9f95a23c
TL
617 osd_crush_chooseleaf_type = 0
618 debug asok assert abort = true
f67539c2
TL
619 $(format_conf "${msgr_conf}")
620 $(format_conf "${extra_conf}")
7c673cae 621EOF
9f95a23c
TL
622 if [ "$lockdep" -eq 1 ] ; then
623 wconf <<EOF
7c673cae
FG
624 lockdep = true
625EOF
9f95a23c
TL
626 fi
627 if [ "$cephx" -eq 1 ] ; then
628 wconf <<EOF
629 auth cluster required = cephx
630 auth service required = cephx
631 auth client required = cephx
7c673cae 632EOF
9f95a23c
TL
633 elif [ "$gssapi_authx" -eq 1 ] ; then
634 wconf <<EOF
635 auth cluster required = gss
636 auth service required = gss
637 auth client required = gss
638 gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
11fdf7f2 639EOF
9f95a23c
TL
640 else
641 wconf <<EOF
642 auth cluster required = none
643 auth service required = none
644 auth client required = none
7c673cae 645EOF
9f95a23c
TL
646 fi
647 if [ "$short" -eq 1 ]; then
648 COSDSHORT=" osd max object name len = 460
7c673cae 649 osd max object namespace len = 64"
9f95a23c
TL
650 fi
651 if [ "$objectstore" == "bluestore" ]; then
652 if [ "$spdk_enabled" -eq 1 ]; then
9f95a23c 653 BLUESTORE_OPTS=" bluestore_block_db_path = \"\"
11fdf7f2
TL
654 bluestore_block_db_size = 0
655 bluestore_block_db_create = false
656 bluestore_block_wal_path = \"\"
657 bluestore_block_wal_size = 0
658 bluestore_block_wal_create = false
9f95a23c
TL
659 bluestore_spdk_mem = 2048"
660 else
661 BLUESTORE_OPTS=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
11fdf7f2
TL
662 bluestore block db size = 1073741824
663 bluestore block db create = true
664 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
665 bluestore block wal size = 1048576000
666 bluestore block wal create = true"
11fdf7f2 667 fi
f67539c2
TL
668 if [ "$zoned_enabled" -eq 1 ]; then
669 BLUESTORE_OPTS+="
670 bluestore min alloc size = 65536
671 bluestore prefer deferred size = 0
672 bluestore prefer deferred size hdd = 0
673 bluestore prefer deferred size ssd = 0
674 bluestore allocator = zoned"
675 fi
676 if [ "$io_uring_enabled" -eq 1 ]; then
677 BLUESTORE_OPTS+="
678 bdev ioring = true"
679 fi
9f95a23c
TL
680 fi
681 wconf <<EOF
7c673cae
FG
682[client]
683 keyring = $keyring_fn
684 log file = $CEPH_OUT_DIR/\$name.\$pid.log
c07f9fc5 685 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
9f95a23c 686
11fdf7f2 687 ; needed for s3tests
9f95a23c 688 rgw crypt s3 kms backend = testing
11fdf7f2
TL
689 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
690 rgw crypt require ssl = false
691 ; uncomment the following to set LC days as the value in seconds;
692 ; needed for passing lc time based s3-tests (can be verbose)
693 ; rgw lc debug interval = 10
f67539c2 694 $(format_conf "${extra_conf}")
9f95a23c 695EOF
9f95a23c
TL
696 do_rgw_conf
697 wconf << EOF
7c673cae
FG
698[mds]
699$DAEMONOPTS
7c673cae
FG
700 mds data = $CEPH_DEV_DIR/mds.\$id
701 mds root ino uid = `id -u`
702 mds root ino gid = `id -g`
f67539c2 703 $(format_conf "${extra_conf}")
7c673cae 704[mgr]
7c673cae
FG
705 mgr data = $CEPH_DEV_DIR/mgr.\$id
706 mgr module path = $MGR_PYTHON_PATH
9f95a23c 707 cephadm path = $CEPH_ROOT/src/cephadm/cephadm
7c673cae 708$DAEMONOPTS
f67539c2 709 $(format_conf "${extra_conf}")
7c673cae
FG
710[osd]
711$DAEMONOPTS
712 osd_check_max_object_name_len_on_startup = false
713 osd data = $CEPH_DEV_DIR/osd\$id
714 osd journal = $CEPH_DEV_DIR/osd\$id/journal
715 osd journal size = 100
716 osd class tmp = out
717 osd class dir = $OBJCLASS_PATH
718 osd class load list = *
719 osd class default list = *
92f5a8d4 720 osd fast shutdown = false
11fdf7f2 721
7c673cae
FG
722 filestore wbthrottle xfs ios start flusher = 10
723 filestore wbthrottle xfs ios hard limit = 20
724 filestore wbthrottle xfs inodes hard limit = 30
725 filestore wbthrottle btrfs ios start flusher = 10
726 filestore wbthrottle btrfs ios hard limit = 20
727 filestore wbthrottle btrfs inodes hard limit = 30
7c673cae
FG
728 bluestore fsck on mount = true
729 bluestore block create = true
11fdf7f2
TL
730$BLUESTORE_OPTS
731
732 ; kstore
733 kstore fsck on mount = true
734 osd objectstore = $objectstore
7c673cae 735$COSDSHORT
f67539c2 736 $(format_conf "${extra_conf}")
7c673cae 737[mon]
11fdf7f2 738 mgr initial modules = $mgr_modules
7c673cae
FG
739$DAEMONOPTS
740$CMONDEBUG
f67539c2 741 $(format_conf "${extra_conf}")
7c673cae 742 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
11fdf7f2 743 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
f67539c2
TL
744 auth allow insecure global id reclaim = false
745EOF
746}
747
748write_logrotate_conf() {
749 out_dir=$(pwd)"/out/*.log"
750
751 cat << EOF
752$out_dir
753{
754 rotate 5
755 size 1G
756 copytruncate
757 compress
758 notifempty
759 missingok
760 sharedscripts
761 postrotate
762 # NOTE: assuring that the absence of one of the following processes
763 # won't abort the logrotate command.
764 killall -u $USER -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || echo ""
765 endscript
766}
7c673cae
FG
767EOF
768}
769
f67539c2
TL
770init_logrotate() {
771 logrotate_conf_path=$(pwd)"/logrotate.conf"
772 logrotate_state_path=$(pwd)"/logrotate.state"
773
774 if ! test -a $logrotate_conf_path; then
775 if test -a $logrotate_state_path; then
776 rm -f $logrotate_state_path
777 fi
778 write_logrotate_conf > $logrotate_conf_path
779 fi
780}
781
7c673cae
FG
782start_mon() {
783 local MONS=""
784 local count=0
785 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
786 do
787 [ $count -eq $CEPH_NUM_MON ] && break;
788 count=$(($count + 1))
9f95a23c
TL
789 if [ -z "$MONS" ]; then
790 MONS="$f"
7c673cae 791 else
9f95a23c 792 MONS="$MONS $f"
7c673cae
FG
793 fi
794 done
795
796 if [ "$new" -eq 1 ]; then
9f95a23c
TL
797 if [ `echo $IP | grep '^127\\.'` ]; then
798 echo
799 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
800 echo " connect. either adjust /etc/hosts, or edit this script to use your"
801 echo " machine's real IP."
802 echo
803 fi
804
805 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
806 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
807 --cap mon 'allow *' \
808 --cap osd 'allow *' \
809 --cap mds 'allow *' \
810 --cap mgr 'allow *' \
811 "$keyring_fn"
812
813 # build a fresh fs monmap, mon fs
814 local params=()
815 local count=0
816 local mon_host=""
817 for f in $MONS
818 do
819 if [ $msgr -eq 1 ]; then
820 A="v1:$IP:$(($CEPH_PORT+$count+1))"
821 fi
822 if [ $msgr -eq 2 ]; then
823 A="v2:$IP:$(($CEPH_PORT+$count+1))"
824 fi
825 if [ $msgr -eq 21 ]; then
826 A="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
827 fi
828 params+=("--addv" "$f" "$A")
829 mon_host="$mon_host $A"
830 wconf <<EOF
7c673cae
FG
831[mon.$f]
832 host = $HOSTNAME
833 mon data = $CEPH_DEV_DIR/mon.$f
7c673cae 834EOF
9f95a23c
TL
835 count=$(($count + 2))
836 done
837 wconf <<EOF
11fdf7f2
TL
838[global]
839 mon host = $mon_host
840EOF
9f95a23c 841 prun "$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
7c673cae 842
9f95a23c
TL
843 for f in $MONS
844 do
845 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
846 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
847 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
848 done
7c673cae 849
9f95a23c
TL
850 prun rm -- "$monmap_fn"
851 fi
7c673cae 852
9f95a23c
TL
853 # start monitors
854 for f in $MONS
855 do
856 run 'mon' $f $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
857 done
7c673cae
FG
858}
859
860start_osd() {
9f95a23c
TL
861 if [ $inc_osd_num -gt 0 ]; then
862 old_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
863 start=$old_maxosd
864 end=$(($start-1+$inc_osd_num))
865 overwrite_conf=1 # fake wconf
866 else
867 start=0
868 end=$(($CEPH_NUM_OSD-1))
869 fi
870 local osds_wait
871 for osd in `seq $start $end`
7c673cae 872 do
9f95a23c
TL
873 local extra_seastar_args
874 if [ "$ceph_osd" == "crimson-osd" ]; then
875 # designate a single CPU node $osd for osd.$osd
876 extra_seastar_args="--smp 1 --cpuset $osd"
877 if [ "$debug" -ne 0 ]; then
878 extra_seastar_args+=" --debug"
879 fi
880 fi
881 if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
882 wconf <<EOF
7c673cae
FG
883[osd.$osd]
884 host = $HOSTNAME
885EOF
9f95a23c
TL
886 if [ "$spdk_enabled" -eq 1 ]; then
887 wconf <<EOF
f67539c2 888 bluestore_block_path = spdk:${bluestore_spdk_dev[$osd]}
9f95a23c
TL
889EOF
890 fi
7c673cae
FG
891
892 rm -rf $CEPH_DEV_DIR/osd$osd || true
893 if command -v btrfs > /dev/null; then
894 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
895 fi
9f95a23c
TL
896 if [ -n "$filestore_path" ]; then
897 ln -s $filestore_path $CEPH_DEV_DIR/osd$osd
898 elif [ -n "$kstore_path" ]; then
899 ln -s $kstore_path $CEPH_DEV_DIR/osd$osd
900 else
901 mkdir -p $CEPH_DEV_DIR/osd$osd
902 if [ -n "${bluestore_dev[$osd]}" ]; then
903 dd if=/dev/zero of=${bluestore_dev[$osd]} bs=1M count=1
904 ln -s ${bluestore_dev[$osd]} $CEPH_DEV_DIR/osd$osd/block
905 wconf <<EOF
906 bluestore fsck on mount = false
907EOF
908 fi
909 fi
7c673cae
FG
910
911 local uuid=`uuidgen`
912 echo "add osd$osd $uuid"
9f95a23c
TL
913 OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
914 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
11fdf7f2 915 ceph_adm osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
9f95a23c
TL
916 rm $CEPH_DEV_DIR/osd$osd/new.json
917 $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args
7c673cae
FG
918
919 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
9f95a23c 920 cat > $key_fn<<EOF
3efd9988 921[osd.$osd]
9f95a23c 922 key = $OSD_SECRET
3efd9988 923EOF
7c673cae 924 fi
31f18b77 925 echo start osd.$osd
9f95a23c
TL
926 local osd_pid
927 run 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
928 $extra_seastar_args $extra_osd_args \
929 -i $osd $ARGS $COSD_ARGS &
930 osd_pid=$!
931 if $parallel; then
932 osds_wait=$osd_pid
933 else
934 wait $osd_pid
935 fi
7c673cae 936 done
9f95a23c
TL
937 if $parallel; then
938 for p in $osds_wait; do
939 wait $p
940 done
941 debug echo OSDs started
942 fi
943 if [ $inc_osd_num -gt 0 ]; then
944 # update num osd
945 new_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
946 sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
947 fi
7c673cae
FG
948}
949
950start_mgr() {
951 local mgr=0
11fdf7f2 952 local ssl=${DASHBOARD_SSL:-1}
31f18b77
FG
953 # avoid monitors on nearby ports (which test/*.sh use extensively)
954 MGR_PORT=$(($CEPH_PORT + 1000))
9f95a23c 955 PROMETHEUS_PORT=9283
7c673cae
FG
956 for name in x y z a b c d e f g h i j k l m n o p
957 do
958 [ $mgr -eq $CEPH_NUM_MGR ] && break
959 mgr=$(($mgr + 1))
960 if [ "$new" -eq 1 ]; then
961 mkdir -p $CEPH_DEV_DIR/mgr.$name
962 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
963 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
964 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
7c673cae 965
11fdf7f2 966 wconf <<EOF
7c673cae
FG
967[mgr.$name]
968 host = $HOSTNAME
969EOF
970
11fdf7f2
TL
971 if $with_mgr_dashboard ; then
972 local port_option="ssl_server_port"
973 local http_proto="https"
974 if [ "$ssl" == "0" ]; then
975 port_option="server_port"
976 http_proto="http"
977 ceph_adm config set mgr mgr/dashboard/ssl false --force
978 fi
979 ceph_adm config set mgr mgr/dashboard/$name/$port_option $MGR_PORT --force
980 if [ $mgr -eq 1 ]; then
981 DASH_URLS="$http_proto://$IP:$MGR_PORT"
982 else
983 DASH_URLS+=", $http_proto://$IP:$MGR_PORT"
984 fi
985 fi
986 MGR_PORT=$(($MGR_PORT + 1000))
9f95a23c
TL
987 ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
988 PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
31f18b77 989
11fdf7f2
TL
990 ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
991 if [ $mgr -eq 1 ]; then
992 RESTFUL_URLS="https://$IP:$MGR_PORT"
993 else
994 RESTFUL_URLS+=", https://$IP:$MGR_PORT"
995 fi
996 MGR_PORT=$(($MGR_PORT + 1000))
997 fi
31f18b77 998
9f95a23c
TL
999 debug echo "Starting mgr.${name}"
1000 run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
7c673cae 1001 done
31f18b77 1002
11fdf7f2
TL
1003 if [ "$new" -eq 1 ]; then
1004 # setting login credentials for dashboard
1005 if $with_mgr_dashboard; then
9f95a23c
TL
1006 while ! ceph_adm -h | grep -c -q ^dashboard ; do
1007 debug echo 'waiting for mgr dashboard module to start'
1008 sleep 1
1009 done
cd265ab1
TL
1010 DASHBOARD_ADMIN_SECRET_FILE="${CEPH_CONF_PATH}/dashboard-admin-secret.txt"
1011 printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
1012 ceph_adm dashboard ac-user-create admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" \
1013 administrator --force-password
11fdf7f2 1014 if [ "$ssl" != "0" ]; then
9f95a23c
TL
1015 if ! ceph_adm dashboard create-self-signed-cert; then
1016 debug echo dashboard module not working correctly!
11fdf7f2 1017 fi
7c673cae 1018 fi
11fdf7f2 1019 fi
7c673cae 1020
9f95a23c
TL
1021 while ! ceph_adm -h | grep -c -q ^restful ; do
1022 debug echo 'waiting for mgr restful module to start'
1023 sleep 1
1024 done
1025 if ceph_adm restful create-self-signed-cert; then
11fdf7f2
TL
1026 SF=`mktemp`
1027 ceph_adm restful create-key admin -o $SF
1028 RESTFUL_SECRET=`cat $SF`
1029 rm $SF
1030 else
9f95a23c 1031 debug echo MGR Restful is not working, perhaps the package is not installed?
7c673cae
FG
1032 fi
1033 fi
9f95a23c
TL
1034
1035 if [ "$cephadm" -eq 1 ]; then
1036 debug echo Enabling cephadm orchestrator
f6b5b4d7
TL
1037 if [ "$new" -eq 1 ]; then
1038 digest=$(curl -s \
1039 https://registry.hub.docker.com/v2/repositories/ceph/daemon-base/tags/latest-master-devel \
f67539c2 1040 | jq -r '.images[0].digest')
f6b5b4d7
TL
1041 ceph_adm config set global container_image "docker.io/ceph/daemon-base@$digest"
1042 fi
9f95a23c
TL
1043 ceph_adm config-key set mgr/cephadm/ssh_identity_key -i ~/.ssh/id_rsa
1044 ceph_adm config-key set mgr/cephadm/ssh_identity_pub -i ~/.ssh/id_rsa.pub
1045 ceph_adm mgr module enable cephadm
1046 ceph_adm orch set backend cephadm
f6b5b4d7 1047 ceph_adm orch host add "$(hostname)"
9f95a23c
TL
1048 ceph_adm orch apply crash '*'
1049 ceph_adm config set mgr mgr/cephadm/allow_ptrace true
1050 fi
11fdf7f2 1051}
7c673cae 1052
11fdf7f2 1053start_mds() {
7c673cae
FG
1054 local mds=0
1055 for name in a b c d e f g h i j k l m n o p
1056 do
9f95a23c
TL
1057 [ $mds -eq $CEPH_NUM_MDS ] && break
1058 mds=$(($mds + 1))
7c673cae 1059
9f95a23c
TL
1060 if [ "$new" -eq 1 ]; then
1061 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
1062 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
1063 wconf <<EOF
7c673cae
FG
1064[mds.$name]
1065 host = $HOSTNAME
1066EOF
9f95a23c
TL
1067 if [ "$standby" -eq 1 ]; then
1068 mkdir -p $CEPH_DEV_DIR/mds.${name}s
1069 wconf <<EOF
1070 mds standby for rank = $mds
7c673cae
FG
1071[mds.${name}s]
1072 mds standby replay = true
1073 mds standby for name = ${name}
1074EOF
9f95a23c
TL
1075 fi
1076 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
1077 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow rw tag cephfs *=*' mds 'allow' mgr 'allow profile mds'
1078 if [ "$standby" -eq 1 ]; then
1079 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
1080 "$CEPH_DEV_DIR/mds.${name}s/keyring"
1081 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
1082 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
1083 fi
1084 fi
7c673cae 1085
9f95a23c
TL
1086 run 'mds' $name $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
1087 if [ "$standby" -eq 1 ]; then
1088 run 'mds' $name $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
1089 fi
7c673cae
FG
1090
1091 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
1092 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
1093 #ceph_adm mds set max_mds 2
1094 done
11fdf7f2
TL
1095
1096 if [ $new -eq 1 ]; then
1097 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
1098 sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
1099 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
1100 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
1101 fi
1102
9f95a23c
TL
1103 # wait for volume module to load
1104 while ! ceph_adm fs volume ls ; do sleep 1 ; done
11fdf7f2
TL
1105 local fs=0
1106 for name in a b c d e f g h i j k l m n o p
1107 do
9f95a23c
TL
1108 ceph_adm fs volume create ${name}
1109 ceph_adm fs authorize ${name} "client.fs_${name}" / rwp >> "$keyring_fn"
11fdf7f2
TL
1110 fs=$(($fs + 1))
1111 [ $fs -eq $CEPH_NUM_FS ] && break
1112 done
1113 fi
1114 fi
1115
7c673cae
FG
1116}
1117
9f95a23c 1118# Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
f6b5b4d7
TL
1119# nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
1120# Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
1121# the packages are available at
1122# https://wiki.centos.org/SpecialInterestGroup/Storage
1123# Similarly for Ubuntu>=16.04 follow the instructions on
1124# https://launchpad.net/~nfs-ganesha
9f95a23c
TL
1125
1126start_ganesha() {
f6b5b4d7 1127 cluster_id="vstart"
9f95a23c
TL
1128 GANESHA_PORT=$(($CEPH_PORT + 4000))
1129 local ganesha=0
f67539c2 1130 test_user="$cluster_id"
f6b5b4d7
TL
1131 pool_name="nfs-ganesha"
1132 namespace=$cluster_id
1133 url="rados://$pool_name/$namespace/conf-nfs.$test_user"
1134
1135 prun ceph_adm auth get-or-create client.$test_user \
1136 mon "allow r" \
1137 osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
1138 mds "allow rw path=/" \
1139 >> "$keyring_fn"
1140
1141 ceph_adm mgr module enable test_orchestrator
1142 ceph_adm orch set backend test_orchestrator
1143 ceph_adm test_orchestrator load_data -i $CEPH_ROOT/src/pybind/mgr/test_orchestrator/dummy_data.json
b3b6e05e 1144 prun ceph_adm nfs cluster create $cluster_id
f6b5b4d7 1145 prun ceph_adm nfs export create cephfs "a" $cluster_id "/cephfs"
9f95a23c
TL
1146
1147 for name in a b c d e f g h i j k l m n o p
1148 do
1149 [ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
1150
1151 port=$(($GANESHA_PORT + ganesha))
1152 ganesha=$(($ganesha + 1))
1153 ganesha_dir="$CEPH_DEV_DIR/ganesha.$name"
9f95a23c
TL
1154 prun rm -rf $ganesha_dir
1155 prun mkdir -p $ganesha_dir
1156
1157 echo "NFS_CORE_PARAM {
f6b5b4d7
TL
1158 Enable_NLM = false;
1159 Enable_RQUOTA = false;
1160 Protocols = 4;
1161 NFS_Port = $port;
1162 }
1163
1164 MDCACHE {
1165 Dir_Chunk = 0;
1166 }
1167
1168 NFSv4 {
1169 RecoveryBackend = rados_cluster;
1170 Minor_Versions = 1, 2;
1171 }
1172
f6b5b4d7
TL
1173 RADOS_KV {
1174 pool = $pool_name;
1175 namespace = $namespace;
1176 UserId = $test_user;
1177 nodeid = $name;
1178 }
1179
1180 RADOS_URLS {
1181 Userid = $test_user;
f67539c2
TL
1182 watch_url = '$url';
1183 }
1184
1185 %url $url" > "$ganesha_dir/ganesha-$name.conf"
9f95a23c
TL
1186 wconf <<EOF
1187[ganesha.$name]
1188 host = $HOSTNAME
1189 ip = $IP
1190 port = $port
1191 ganesha data = $ganesha_dir
f6b5b4d7 1192 pid file = $ganesha_dir/ganesha-$name.pid
9f95a23c
TL
1193EOF
1194
f6b5b4d7
TL
1195 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace add $name
1196 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
9f95a23c 1197
f6b5b4d7 1198 prun env CEPH_CONF="${conf_fn}" ganesha.nfsd -L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
9f95a23c
TL
1199
1200 # Wait few seconds for grace period to be removed
1201 sleep 2
f6b5b4d7
TL
1202
1203 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
9f95a23c 1204
f6b5b4d7 1205 echo "$test_user ganesha daemon $name started on port: $port"
9f95a23c
TL
1206 done
1207
1208 if $with_mgr_dashboard; then
f67539c2 1209 ceph_adm dashboard set-ganesha-clusters-rados-pool-namespace "$cluster_id:$pool_name/$cluster_id"
9f95a23c
TL
1210 fi
1211}
1212
7c673cae
FG
1213if [ "$debug" -eq 0 ]; then
1214 CMONDEBUG='
1215 debug mon = 10
1216 debug ms = 1'
7c673cae 1217else
9f95a23c 1218 debug echo "** going verbose **"
7c673cae
FG
1219 CMONDEBUG='
1220 debug mon = 20
1221 debug paxos = 20
1222 debug auth = 20
9f95a23c 1223 debug mgrc = 20
7c673cae 1224 debug ms = 1'
7c673cae
FG
1225fi
1226
1227if [ -n "$MON_ADDR" ]; then
9f95a23c
TL
1228 CMON_ARGS=" -m "$MON_ADDR
1229 COSD_ARGS=" -m "$MON_ADDR
1230 CMDS_ARGS=" -m "$MON_ADDR
7c673cae
FG
1231fi
1232
9f95a23c
TL
1233if [ -z "$CEPH_PORT" ]; then
1234 while [ true ]
1235 do
1236 CEPH_PORT="$(echo $(( RANDOM % 1000 + 40000 )))"
1237 ss -a -n | egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev/null 2>&1 || break
1238 done
7c673cae
FG
1239fi
1240
1241[ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
1242
1243# sudo if btrfs
9f95a23c 1244[ -d $CEPH_DEV_DIR/osd0/. ] && [ -e $CEPH_DEV_DIR/sudo ] && SUDO="sudo"
7c673cae 1245
9f95a23c
TL
1246if [ $inc_osd_num -eq 0 ]; then
1247 prun $SUDO rm -f core*
1248fi
7c673cae 1249
9f95a23c
TL
1250[ -d $CEPH_ASOK_DIR ] || mkdir -p $CEPH_ASOK_DIR
1251[ -d $CEPH_OUT_DIR ] || mkdir -p $CEPH_OUT_DIR
1252[ -d $CEPH_DEV_DIR ] || mkdir -p $CEPH_DEV_DIR
1253if [ $inc_osd_num -eq 0 ]; then
f67539c2 1254 $SUDO find "$CEPH_OUT_DIR" -type f -delete
9f95a23c
TL
1255fi
1256[ -d gmon ] && $SUDO rm -rf gmon/*
7c673cae 1257
9f95a23c 1258[ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
7c673cae
FG
1259
1260
1261# figure machine's ip
1262HOSTNAME=`hostname -s`
1263if [ -n "$ip" ]; then
1264 IP="$ip"
1265else
1266 echo hostname $HOSTNAME
1267 if [ -x "$(which ip 2>/dev/null)" ]; then
9f95a23c 1268 IP_CMD="ip addr"
7c673cae 1269 else
9f95a23c 1270 IP_CMD="ifconfig"
7c673cae 1271 fi
9f95a23c 1272 # filter out IPv4 and localhost addresses
7c673cae
FG
1273 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
1274 # if nothing left, try using localhost address, it might work
1275 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
1276fi
1277echo "ip $IP"
1278echo "port $CEPH_PORT"
1279
1280
1281[ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
1282
1283ceph_adm() {
1284 if [ "$cephx" -eq 1 ]; then
1285 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
1286 else
1287 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
1288 fi
1289}
1290
9f95a23c
TL
1291if [ $inc_osd_num -gt 0 ]; then
1292 start_osd
1293 exit
1294fi
1295
7c673cae
FG
1296if [ "$new" -eq 1 ]; then
1297 prepare_conf
1298fi
1299
1300if [ $CEPH_NUM_MON -gt 0 ]; then
1301 start_mon
11fdf7f2 1302
9f95a23c 1303 debug echo Populating config ...
11fdf7f2
TL
1304 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1305[global]
1306osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
1307osd_pool_default_min_size = 1
11fdf7f2
TL
1308
1309[mon]
1310mon_osd_reporter_subtree_level = osd
1311mon_data_avail_warn = 2
1312mon_data_avail_crit = 1
1313mon_allow_pool_delete = true
f67539c2 1314mon_allow_pool_size_one = true
11fdf7f2
TL
1315
1316[osd]
1317osd_scrub_load_threshold = 2000
1318osd_debug_op_order = true
1319osd_debug_misdirected_ops = true
1320osd_copyfrom_max_chunk = 524288
1321
1322[mds]
1323mds_debug_frag = true
1324mds_debug_auth_pins = true
1325mds_debug_subtrees = true
1326
9f95a23c
TL
1327[mgr]
1328mgr/telemetry/nag = false
1329mgr/telemetry/enable = false
1330
11fdf7f2
TL
1331EOF
1332
1333 if [ "$debug" -ne 0 ]; then
9f95a23c
TL
1334 debug echo Setting debug configs ...
1335 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
11fdf7f2
TL
1336[mgr]
1337debug_ms = 1
1338debug_mgr = 20
1339debug_monc = 20
1340debug_mon = 20
1341
1342[osd]
1343debug_ms = 1
1344debug_osd = 25
1345debug_objecter = 20
1346debug_monc = 20
1347debug_mgrc = 20
1348debug_journal = 20
1349debug_filestore = 20
1350debug_bluestore = 20
1351debug_bluefs = 20
1352debug_rocksdb = 20
1353debug_bdev = 20
1354debug_reserver = 10
1355debug_objclass = 20
1356
1357[mds]
1358debug_ms = 1
1359debug_mds = 20
1360debug_monc = 20
1361debug_mgrc = 20
1362mds_debug_scatterstat = true
1363mds_verify_scatter = true
1364EOF
1365 fi
f6b5b4d7
TL
1366 if [ "$cephadm" -gt 0 ]; then
1367 debug echo Setting mon public_network ...
1368 public_network=$(ip route list | grep -w "$IP" | awk '{print $1}')
1369 ceph_adm config set mon public_network $public_network
1370 fi
7c673cae
FG
1371fi
1372
c07f9fc5
FG
1373if [ $CEPH_NUM_MGR -gt 0 ]; then
1374 start_mgr
1375fi
1376
7c673cae
FG
1377# osd
1378if [ $CEPH_NUM_OSD -gt 0 ]; then
1379 start_osd
1380fi
1381
1382# mds
1383if [ "$smallmds" -eq 1 ]; then
1384 wconf <<EOF
1385[mds]
9f95a23c
TL
1386 mds log max segments = 2
1387 # Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
1388 mds cache memory limit = 100M
7c673cae
FG
1389EOF
1390fi
1391
1392if [ $CEPH_NUM_MDS -gt 0 ]; then
1393 start_mds
9f95a23c
TL
1394 # key with access to all FS
1395 ceph_adm fs authorize \* "client.fs" / rwp >> "$keyring_fn"
7c673cae
FG
1396fi
1397
1398# Don't set max_mds until all the daemons are started, otherwise
1399# the intended standbys might end up in active roles.
1400if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1401 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
1402fi
1403fs=0
1404for name in a b c d e f g h i j k l m n o p
1405do
1406 [ $fs -eq $CEPH_NUM_FS ] && break
1407 fs=$(($fs + 1))
1408 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
f91f0fd5 1409 ceph_adm fs set "${name}" max_mds "$CEPH_MAX_MDS"
7c673cae
FG
1410 fi
1411done
1412
1413# mgr
1414
7c673cae
FG
1415if [ "$ec" -eq 1 ]; then
1416 ceph_adm <<EOF
1417osd erasure-code-profile set ec-profile m=2 k=2
9f95a23c 1418osd pool create ec erasure ec-profile
7c673cae
FG
1419EOF
1420fi
1421
9f95a23c
TL
1422# Ganesha Daemons
1423if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
f6b5b4d7 1424 pseudo_path="/cephfs"
b3b6e05e 1425 ceph_adm mgr module enable nfs
f6b5b4d7
TL
1426 if [ "$cephadm" -gt 0 ]; then
1427 cluster_id="vstart"
b3b6e05e 1428 prun ceph_adm nfs cluster create $cluster_id
f6b5b4d7
TL
1429 prun ceph_adm nfs export create cephfs "a" $cluster_id $pseudo_path
1430 port="2049"
1431 else
1432 start_ganesha
1433 port="<ganesha-port-num>"
1434 fi
1435 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
9f95a23c
TL
1436fi
1437
7c673cae
FG
1438do_cache() {
1439 while [ -n "$*" ]; do
9f95a23c
TL
1440 p="$1"
1441 shift
1442 debug echo "creating cache for pool $p ..."
1443 ceph_adm <<EOF
1444osd pool create ${p}-cache
7c673cae
FG
1445osd tier add $p ${p}-cache
1446osd tier cache-mode ${p}-cache writeback
1447osd tier set-overlay $p ${p}-cache
1448EOF
1449 done
1450}
1451do_cache $cache
1452
1453do_hitsets() {
1454 while [ -n "$*" ]; do
9f95a23c
TL
1455 pool="$1"
1456 type="$2"
1457 shift
1458 shift
1459 debug echo "setting hit_set on pool $pool type $type ..."
1460 ceph_adm <<EOF
7c673cae
FG
1461osd pool set $pool hit_set_type $type
1462osd pool set $pool hit_set_count 8
1463osd pool set $pool hit_set_period 30
1464EOF
1465 done
1466}
1467do_hitsets $hitset
1468
224ce89b 1469do_rgw_create_users()
7c673cae
FG
1470{
1471 # Create S3 user
1472 local akey='0555b35654ad1656d804'
1473 local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
9f95a23c 1474 debug echo "setting up user testid"
b3b6e05e 1475 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com --system -c $conf_fn > /dev/null
7c673cae
FG
1476
1477 # Create S3-test users
1478 # See: https://github.com/ceph/s3-tests
9f95a23c 1479 debug echo "setting up s3-test users"
7c673cae
FG
1480 $CEPH_BIN/radosgw-admin user create \
1481 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1482 --access-key ABCDEFGHIJKLMNOPQRST \
1483 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
1484 --display-name youruseridhere \
1485 --email s3@example.com -c $conf_fn > /dev/null
1486 $CEPH_BIN/radosgw-admin user create \
1487 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
1488 --access-key NOPQRSTUVWXYZABCDEFG \
1489 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
1490 --display-name john.doe \
1491 --email john.doe@example.com -c $conf_fn > /dev/null
31f18b77
FG
1492 $CEPH_BIN/radosgw-admin user create \
1493 --tenant testx \
1494 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1495 --access-key HIJKLMNOPQRSTUVWXYZA \
1496 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
1497 --display-name tenanteduser \
1498 --email tenanteduser@example.com -c $conf_fn > /dev/null
7c673cae
FG
1499
1500 # Create Swift user
9f95a23c 1501 debug echo "setting up user tester"
7c673cae
FG
1502 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
1503
1504 echo ""
1505 echo "S3 User Info:"
1506 echo " access key: $akey"
1507 echo " secret key: $skey"
1508 echo ""
1509 echo "Swift User Info:"
1510 echo " account : test"
1511 echo " user : tester"
1512 echo " password : testing"
1513 echo ""
224ce89b 1514}
7c673cae 1515
224ce89b
WB
1516do_rgw()
1517{
1518 if [ "$new" -eq 1 ]; then
9f95a23c 1519 do_rgw_create_users
224ce89b 1520 if [ -n "$rgw_compression" ]; then
9f95a23c 1521 debug echo "setting compression type=$rgw_compression"
224ce89b
WB
1522 $CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
1523 fi
1524 fi
7c673cae 1525 # Start server
b3b6e05e
TL
1526 if [ "$cephadm" -gt 0 ]; then
1527 ceph_adm orch apply rgw rgwTest
1528 return
1529 fi
1530
7c673cae
FG
1531 RGWDEBUG=""
1532 if [ "$debug" -ne 0 ]; then
9f95a23c 1533 RGWDEBUG="--debug-rgw=20 --debug-ms=1"
7c673cae
FG
1534 fi
1535
11fdf7f2
TL
1536 local CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT}"
1537 local CEPH_RGW_HTTPS="${CEPH_RGW_PORT: -1}"
1538 if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
1539 CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT::-1}"
1540 else
1541 CEPH_RGW_HTTPS=""
1542 fi
7c673cae 1543 RGWSUDO=
11fdf7f2 1544 [ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO=sudo
9f95a23c
TL
1545
1546 current_port=$CEPH_RGW_PORT
1547 for n in $(seq 1 $CEPH_NUM_RGW); do
1548 rgw_name="client.rgw.${current_port}"
1549
1550 ceph_adm auth get-or-create $rgw_name \
1551 mon 'allow rw' \
1552 osd 'allow rwx' \
1553 mgr 'allow rw' \
1554 >> "$keyring_fn"
1555
1556 debug echo start rgw on http${CEPH_RGW_HTTPS}://localhost:${current_port}
1557 run 'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn \
1558 --log-file=${CEPH_OUT_DIR}/radosgw.${current_port}.log \
1559 --admin-socket=${CEPH_OUT_DIR}/radosgw.${current_port}.asok \
1560 --pid-file=${CEPH_OUT_DIR}/radosgw.${current_port}.pid \
f67539c2 1561 --rgw_luarocks_location=${CEPH_OUT_DIR}/luarocks \
9f95a23c
TL
1562 ${RGWDEBUG} \
1563 -n ${rgw_name} \
1564 "--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}"
1565
11fdf7f2 1566 i=$(($i + 1))
224ce89b 1567 [ $i -eq $CEPH_NUM_RGW ] && break
9f95a23c
TL
1568
1569 current_port=$((current_port+1))
7c673cae
FG
1570 done
1571}
1572if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1573 do_rgw
1574fi
1575
f67539c2
TL
1576
1577 docker_service(){
1578 local service=''
1579 #prefer podman
1580 if pgrep -f podman > /dev/null; then
1581 service="podman"
1582 elif pgrep -f docker > /dev/null; then
1583 service="docker"
1584 fi
1585 if [ -n "$service" ]; then
1586 echo "using $service for deploying jaeger..."
1587 #check for exited container, remove them and restart container
1588 if [ "$($service ps -aq -f status=exited -f name=jaeger)" ]; then
1589 $service rm jaeger
1590 fi
1591 if [ ! "$(podman ps -aq -f name=jaeger)" ]; then
1592 $service "$@"
1593 fi
1594 else
1595 echo "cannot find docker or podman, please restart service and rerun."
1596 fi
1597 }
1598
1599echo ""
1600if [ $with_jaeger -eq 1 ]; then
1601 debug echo "Enabling jaegertracing..."
1602 docker_service run -d --name jaeger \
1603 -p 5775:5775/udp \
1604 -p 6831:6831/udp \
1605 -p 6832:6832/udp \
1606 -p 5778:5778 \
1607 -p 16686:16686 \
1608 -p 14268:14268 \
1609 -p 14250:14250 \
1610 jaegertracing/all-in-one:1.20
1611fi
1612
1613
9f95a23c 1614debug echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
7c673cae 1615
31f18b77 1616echo ""
11fdf7f2
TL
1617if [ "$new" -eq 1 ]; then
1618 if $with_mgr_dashboard; then
1619 echo "dashboard urls: $DASH_URLS"
1620 echo " w/ user/pass: admin / admin"
1621 fi
1622 echo "restful urls: $RESTFUL_URLS"
1623 echo " w/ user/pass: admin / $RESTFUL_SECRET"
1624 echo ""
1625fi
7c673cae 1626echo ""
9f95a23c
TL
1627# add header to the environment file
1628{
1629 echo "#"
1630 echo "# source this file into your shell to set up the environment."
1631 echo "# For example:"
1632 echo "# $ . $CEPH_DIR/vstart_environment.sh"
1633 echo "#"
1634} > $CEPH_DIR/vstart_environment.sh
1635{
1636 echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
1637 echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
f67539c2 1638 echo "export PATH=$CEPH_DIR/bin:\$PATH"
7c673cae 1639
9f95a23c
TL
1640 if [ "$CEPH_DIR" != "$PWD" ]; then
1641 echo "export CEPH_CONF=$conf_fn"
1642 echo "export CEPH_KEYRING=$keyring_fn"
1643 fi
1644
1645 if [ -n "$CEPHFS_SHELL" ]; then
1646 echo "alias cephfs-shell=$CEPHFS_SHELL"
1647 fi
1648} | tee -a $CEPH_DIR/vstart_environment.sh
31f18b77
FG
1649
1650echo "CEPH_DEV=1"
1651
9f95a23c
TL
1652# always keep this section at the very bottom of this file
1653STRAY_CONF_PATH="/etc/ceph/ceph.conf"
1654if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
1655 echo ""
1656 echo ""
1657 echo "WARNING:"
1658 echo " Please remove stray $STRAY_CONF_PATH if not needed."
1659 echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
1660 echo " and may lead to undesired results."
1661 echo ""
1662 echo "NOTE:"
1663 echo " Remember to restart cluster after removing $STRAY_CONF_PATH"
1664fi
f67539c2
TL
1665
1666init_logrotate