]> git.proxmox.com Git - ceph.git/blob - ceph/src/vstart.sh
02d84cc7fcc3c110fd6048d1ebf7272ed47ea095
[ceph.git] / ceph / src / vstart.sh
1 #!/usr/bin/env bash
2 # -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
3 # vim: softtabstop=4 shiftwidth=4 expandtab
4
5 # abort on failure
6 set -e
7
8 quoted_print() {
9 for s in "$@"; do
10 if [[ "$s" =~ \ ]]; then
11 printf -- "'%s' " "$s"
12 else
13 printf -- "$s "
14 fi
15 done
16 printf '\n'
17 }
18
19 debug() {
20 "$@" >&2
21 }
22
23 prunb() {
24 debug quoted_print "$@" '&'
25 PATH=$CEPH_BIN:$PATH "$@" &
26 }
27
28 prun() {
29 debug quoted_print "$@"
30 PATH=$CEPH_BIN:$PATH "$@"
31 }
32
33
34 if [ -n "$VSTART_DEST" ]; then
35 SRC_PATH=`dirname $0`
36 SRC_PATH=`(cd $SRC_PATH; pwd)`
37
38 CEPH_DIR=$SRC_PATH
39 CEPH_BIN=${CEPH_BIN:-${PWD}/bin}
40 CEPH_LIB=${CEPH_LIB:-${PWD}/lib}
41
42 CEPH_CONF_PATH=$VSTART_DEST
43 CEPH_DEV_DIR=$VSTART_DEST/dev
44 CEPH_OUT_DIR=$VSTART_DEST/out
45 CEPH_ASOK_DIR=$VSTART_DEST/asok
46 CEPH_OUT_CLIENT_DIR=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
47 fi
48
49 get_cmake_variable() {
50 local variable=$1
51 grep "${variable}:" CMakeCache.txt | cut -d "=" -f 2
52 }
53
54 # for running out of the CMake build directory
55 if [ -e CMakeCache.txt ]; then
56 # Out of tree build, learn source location from CMakeCache.txt
57 CEPH_ROOT=$(get_cmake_variable ceph_SOURCE_DIR)
58 CEPH_BUILD_DIR=`pwd`
59 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
60 fi
61
62 # use CEPH_BUILD_ROOT to vstart from a 'make install'
63 if [ -n "$CEPH_BUILD_ROOT" ]; then
64 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
65 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
66 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_ROOT/external/lib
67 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
68 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
69 # make install should install python extensions into PYTHONPATH
70 elif [ -n "$CEPH_ROOT" ]; then
71 [ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL=$CEPH_ROOT/src/tools/cephfs/shell/cephfs-shell
72 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
73 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
74 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
75 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
76 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
77 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_DIR/external/lib
78 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
79 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
80 [ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON=$CEPH_ROOT/src/python-common
81 fi
82
83 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
84 PATH=$(pwd):$PATH
85 fi
86
87 [ -z "$PYBIND" ] && PYBIND=./pybind
88
89 [ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON="$CEPH_PYTHON_COMMON:"
90 CYTHON_PYTHONPATH="$CEPH_LIB/cython_modules/lib.3"
91 export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
92
93 export LD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$LD_LIBRARY_PATH
94 export DYLD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$DYLD_LIBRARY_PATH
95 # Suppress logging for regular use that indicated that we are using a
96 # development version. vstart.sh is only used during testing and
97 # development
98 export CEPH_DEV=1
99
100 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
101 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
102 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
103 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
104 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
105 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
106 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM="$NFS"
107
108 # if none of the CEPH_NUM_* number is specified, kill the existing
109 # cluster.
110 if [ -z "$CEPH_NUM_MON" -a \
111 -z "$CEPH_NUM_OSD" -a \
112 -z "$CEPH_NUM_MDS" -a \
113 -z "$CEPH_NUM_MGR" -a \
114 -z "$GANESHA_DAEMON_NUM" ]; then
115 kill_all=1
116 else
117 kill_all=0
118 fi
119
120 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
121 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
122 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
123 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
124 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
125 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
126 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
127 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM=0
128
129 [ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
130 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
131 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
132 [ -z "$CEPH_ASOK_DIR" ] && CEPH_ASOK_DIR="$CEPH_DIR/asok"
133 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
134 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
135 CEPH_OUT_CLIENT_DIR=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
136
137 if [ $CEPH_NUM_OSD -gt 3 ]; then
138 OSD_POOL_DEFAULT_SIZE=3
139 else
140 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
141 fi
142
143 extra_conf=""
144 new=0
145 standby=0
146 debug=0
147 trace=0
148 ip=""
149 nodaemon=0
150 redirect=0
151 smallmds=0
152 short=0
153 crimson=0
154 ec=0
155 cephadm=0
156 parallel=true
157 restart=1
158 hitset=""
159 overwrite_conf=0
160 cephx=1 #turn cephx on by default
161 gssapi_authx=0
162 cache=""
163 if [ `uname` = FreeBSD ]; then
164 objectstore="memstore"
165 else
166 objectstore="bluestore"
167 fi
168 ceph_osd=ceph-osd
169 rgw_frontend="beast"
170 rgw_compression=""
171 lockdep=${LOCKDEP:-1}
172 spdk_enabled=0 # disable SPDK by default
173 pmem_enabled=0
174 zoned_enabled=0
175 io_uring_enabled=0
176 with_jaeger=0
177
178 with_mgr_dashboard=true
179 if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
180 [[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
181 debug echo "ceph-mgr dashboard not built - disabling."
182 with_mgr_dashboard=false
183 fi
184 with_mgr_restful=false
185
186 kstore_path=
187 declare -a block_devs
188 declare -a secondary_block_devs
189 secondary_block_devs_type="SSD"
190
191 VSTART_SEC="client.vstart.sh"
192
193 MON_ADDR=""
194 DASH_URLS=""
195 RESTFUL_URLS=""
196
197 conf_fn="$CEPH_CONF_PATH/ceph.conf"
198 keyring_fn="$CEPH_CONF_PATH/keyring"
199 monmap_fn="/tmp/ceph_monmap.$$"
200 inc_osd_num=0
201
202 msgr="21"
203
204 read -r -d '' usage <<EOF || true
205 usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d
206 options:
207 -d, --debug
208 -t, --trace
209 -s, --standby_mds: Generate standby-replay MDS for each active
210 -l, --localhost: use localhost instead of hostname
211 -i <ip>: bind to specific ip
212 -n, --new
213 --valgrind[_{osd,mds,mon,rgw}] 'toolname args...'
214 --nodaemon: use ceph-run as wrapper for mon/osd/mds
215 --redirect-output: only useful with nodaemon, directs output to log file
216 --smallmds: limit mds cache memory limit
217 -m ip:port specify monitor address
218 -k keep old configuration files (default)
219 -x enable cephx (on by default)
220 -X disable cephx
221 -g --gssapi enable Kerberos/GSSApi authentication
222 -G disable Kerberos/GSSApi authentication
223 --hitset <pool> <hit_set_type>: enable hitset tracking
224 -e : create an erasure pool
225 -o config add extra config parameters to all sections
226 --rgw_port specify ceph rgw http listen port
227 --rgw_frontend specify the rgw frontend configuration
228 --rgw_arrow_flight start arrow flight frontend
229 --rgw_compression specify the rgw compression plugin
230 --seastore use seastore as crimson osd backend
231 -b, --bluestore use bluestore as the osd objectstore backend (default)
232 -K, --kstore use kstore as the osd objectstore backend
233 --cyanstore use cyanstore as the osd objectstore backend
234 --memstore use memstore as the osd objectstore backend
235 --cache <pool>: enable cache tiering on pool
236 --short: short object names only; necessary for ext4 dev
237 --nolockdep disable lockdep
238 --multimds <count> allow multimds with maximum active count
239 --without-dashboard: do not run using mgr dashboard
240 --bluestore-spdk: enable SPDK and with a comma-delimited list of PCI-IDs of NVME device (e.g, 0000:81:00.0)
241 --bluestore-pmem: enable PMEM and with path to a file mapped to PMEM
242 --msgr1: use msgr1 only
243 --msgr2: use msgr2 only
244 --msgr21: use msgr2 and msgr1
245 --crimson: use crimson-osd instead of ceph-osd
246 --crimson-foreground: use crimson-osd, but run it in the foreground
247 --osd-args: specify any extra osd specific options
248 --bluestore-devs: comma-separated list of blockdevs to use for bluestore
249 --bluestore-zoned: blockdevs listed by --bluestore-devs are zoned devices (HM-SMR HDD or ZNS SSD)
250 --bluestore-io-uring: enable io_uring backend
251 --inc-osd: append some more osds into existing vcluster
252 --cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]
253 --no-parallel: dont start all OSDs in parallel
254 --no-restart: dont restart process when using ceph-run
255 --jaeger: use jaegertracing for tracing
256 --seastore-devs: comma-separated list of blockdevs to use for seastore
257 --seastore-secondary-devs: comma-separated list of secondary blockdevs to use for seastore
258 --seastore-secondary-devs-type: device type of all secondary blockdevs. HDD, SSD(default), ZNS or RANDOM_BLOCK_SSD
259 --crimson-smp: number of cores to use for crimson
260 \n
261 EOF
262
263 usage_exit() {
264 printf "$usage"
265 exit
266 }
267
268 parse_block_devs() {
269 local opt_name=$1
270 shift
271 local devs=$1
272 shift
273 local dev
274 IFS=',' read -r -a block_devs <<< "$devs"
275 for dev in "${block_devs[@]}"; do
276 if [ ! -b $dev ] || [ ! -w $dev ]; then
277 echo "All $opt_name must refer to writable block devices"
278 exit 1
279 fi
280 done
281 }
282
283 parse_secondary_devs() {
284 local opt_name=$1
285 shift
286 local devs=$1
287 shift
288 local dev
289 IFS=',' read -r -a secondary_block_devs <<< "$devs"
290 for dev in "${secondary_block_devs[@]}"; do
291 if [ ! -b $dev ] || [ ! -w $dev ]; then
292 echo "All $opt_name must refer to writable block devices"
293 exit 1
294 fi
295 done
296 }
297
298 crimson_smp=1
299 while [ $# -ge 1 ]; do
300 case $1 in
301 -d | --debug)
302 debug=1
303 ;;
304 -t | --trace)
305 trace=1
306 ;;
307 -s | --standby_mds)
308 standby=1
309 ;;
310 -l | --localhost)
311 ip="127.0.0.1"
312 ;;
313 -i)
314 [ -z "$2" ] && usage_exit
315 ip="$2"
316 shift
317 ;;
318 -e)
319 ec=1
320 ;;
321 --new | -n)
322 new=1
323 ;;
324 --inc-osd)
325 new=0
326 kill_all=0
327 inc_osd_num=$2
328 if [ "$inc_osd_num" == "" ]; then
329 inc_osd_num=1
330 else
331 shift
332 fi
333 ;;
334 --short)
335 short=1
336 ;;
337 --crimson)
338 crimson=1
339 ceph_osd=crimson-osd
340 nodaemon=1
341 msgr=2
342 ;;
343 --crimson-foreground)
344 crimson=1
345 ceph_osd=crimson-osd
346 nodaemon=0
347 msgr=2
348 ;;
349 --osd-args)
350 extra_osd_args="$2"
351 shift
352 ;;
353 --msgr1)
354 msgr="1"
355 ;;
356 --msgr2)
357 msgr="2"
358 ;;
359 --msgr21)
360 msgr="21"
361 ;;
362 --cephadm)
363 cephadm=1
364 ;;
365 --no-parallel)
366 parallel=false
367 ;;
368 --no-restart)
369 restart=0
370 ;;
371 --valgrind)
372 [ -z "$2" ] && usage_exit
373 valgrind=$2
374 shift
375 ;;
376 --valgrind_args)
377 valgrind_args="$2"
378 shift
379 ;;
380 --valgrind_mds)
381 [ -z "$2" ] && usage_exit
382 valgrind_mds=$2
383 shift
384 ;;
385 --valgrind_osd)
386 [ -z "$2" ] && usage_exit
387 valgrind_osd=$2
388 shift
389 ;;
390 --valgrind_mon)
391 [ -z "$2" ] && usage_exit
392 valgrind_mon=$2
393 shift
394 ;;
395 --valgrind_mgr)
396 [ -z "$2" ] && usage_exit
397 valgrind_mgr=$2
398 shift
399 ;;
400 --valgrind_rgw)
401 [ -z "$2" ] && usage_exit
402 valgrind_rgw=$2
403 shift
404 ;;
405 --nodaemon)
406 nodaemon=1
407 ;;
408 --redirect-output)
409 redirect=1
410 ;;
411 --smallmds)
412 smallmds=1
413 ;;
414 --rgw_port)
415 CEPH_RGW_PORT=$2
416 shift
417 ;;
418 --rgw_frontend)
419 rgw_frontend=$2
420 shift
421 ;;
422 --rgw_arrow_flight)
423 rgw_flight_frontend="yes"
424 ;;
425 --rgw_compression)
426 rgw_compression=$2
427 shift
428 ;;
429 --kstore_path)
430 kstore_path=$2
431 shift
432 ;;
433 -m)
434 [ -z "$2" ] && usage_exit
435 MON_ADDR=$2
436 shift
437 ;;
438 -x)
439 cephx=1 # this is on be default, flag exists for historical consistency
440 ;;
441 -X)
442 cephx=0
443 ;;
444
445 -g | --gssapi)
446 gssapi_authx=1
447 ;;
448 -G)
449 gssapi_authx=0
450 ;;
451
452 -k)
453 if [ ! -r $conf_fn ]; then
454 echo "cannot use old configuration: $conf_fn not readable." >&2
455 exit
456 fi
457 new=0
458 ;;
459 --memstore)
460 objectstore="memstore"
461 ;;
462 --cyanstore)
463 objectstore="cyanstore"
464 ;;
465 --seastore)
466 objectstore="seastore"
467 ;;
468 -b | --bluestore)
469 objectstore="bluestore"
470 ;;
471 -K | --kstore)
472 objectstore="kstore"
473 ;;
474 --hitset)
475 hitset="$hitset $2 $3"
476 shift
477 shift
478 ;;
479 -o)
480 extra_conf+=$'\n'"$2"
481 shift
482 ;;
483 --cache)
484 if [ -z "$cache" ]; then
485 cache="$2"
486 else
487 cache="$cache $2"
488 fi
489 shift
490 ;;
491 --nolockdep)
492 lockdep=0
493 ;;
494 --multimds)
495 CEPH_MAX_MDS="$2"
496 shift
497 ;;
498 --without-dashboard)
499 with_mgr_dashboard=false
500 ;;
501 --with-restful)
502 with_mgr_restful=true
503 ;;
504 --seastore-devs)
505 parse_block_devs --seastore-devs "$2"
506 shift
507 ;;
508 --seastore-secondary-devs)
509 parse_secondary_devs --seastore-devs "$2"
510 shift
511 ;;
512 --seastore-secondary-devs-type)
513 secondary_block_devs_type="$2"
514 shift
515 ;;
516 --crimson-smp)
517 crimson_smp=$2
518 shift
519 ;;
520 --bluestore-spdk)
521 [ -z "$2" ] && usage_exit
522 IFS=',' read -r -a bluestore_spdk_dev <<< "$2"
523 spdk_enabled=1
524 shift
525 ;;
526 --bluestore-pmem)
527 [ -z "$2" ] && usage_exit
528 bluestore_pmem_file="$2"
529 pmem_enabled=1
530 shift
531 ;;
532 --bluestore-devs)
533 parse_block_devs --bluestore-devs "$2"
534 shift
535 ;;
536 --bluestore-zoned)
537 zoned_enabled=1
538 ;;
539 --bluestore-io-uring)
540 io_uring_enabled=1
541 shift
542 ;;
543 --jaeger)
544 with_jaeger=1
545 echo "with_jaeger $with_jaeger"
546 ;;
547 *)
548 usage_exit
549 esac
550 shift
551 done
552
553 if [ $kill_all -eq 1 ]; then
554 $SUDO $INIT_CEPH stop
555 fi
556
557 if [ "$new" -eq 0 ]; then
558 if [ -z "$CEPH_ASOK_DIR" ]; then
559 CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
560 fi
561 mkdir -p $CEPH_ASOK_DIR
562 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
563 CEPH_NUM_MON="$MON"
564 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
565 CEPH_NUM_OSD="$OSD"
566 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
567 CEPH_NUM_MDS="$MDS"
568 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
569 CEPH_NUM_MGR="$MGR"
570 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
571 CEPH_NUM_RGW="$RGW"
572 NFS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
573 GANESHA_DAEMON_NUM="$NFS"
574 else
575 # only delete if -n
576 if [ -e "$conf_fn" ]; then
577 asok_dir=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
578 rm -- "$conf_fn"
579 if [ $asok_dir != /var/run/ceph ]; then
580 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
581 fi
582 fi
583 if [ -z "$CEPH_ASOK_DIR" ]; then
584 CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
585 fi
586 fi
587
588 ARGS="-c $conf_fn"
589
590 run() {
591 type=$1
592 shift
593 num=$1
594 shift
595 eval "valg=\$valgrind_$type"
596 [ -z "$valg" ] && valg="$valgrind"
597
598 if [ -n "$valg" ]; then
599 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
600 sleep 1
601 else
602 if [ "$nodaemon" -eq 0 ]; then
603 prun "$@"
604 else
605 if [ "$restart" -eq 0 ]; then
606 set -- '--no-restart' "$@"
607 fi
608 if [ "$redirect" -eq 0 ]; then
609 prunb ${CEPH_ROOT}/src/ceph-run "$@" -f
610 else
611 ( prunb ${CEPH_ROOT}/src/ceph-run "$@" -f ) >$CEPH_OUT_DIR/$type.$num.stdout 2>&1
612 fi
613 fi
614 fi
615 }
616
617 wconf() {
618 if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
619 cat >> "$conf_fn"
620 fi
621 }
622
623
624 do_rgw_conf() {
625
626 if [ $CEPH_NUM_RGW -eq 0 ]; then
627 return 0
628 fi
629
630 # setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
631 # individual rgw's ids will be their ports.
632 current_port=$CEPH_RGW_PORT
633 # allow only first rgw to start arrow_flight server/port
634 local flight_conf=$rgw_flight_frontend
635 for n in $(seq 1 $CEPH_NUM_RGW); do
636 wconf << EOF
637 [client.rgw.${current_port}]
638 rgw frontends = $rgw_frontend port=${current_port}${flight_conf:+,arrow_flight}
639 admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
640 debug rgw_flight = 20
641 EOF
642 current_port=$((current_port + 1))
643 unset flight_conf
644 done
645
646 }
647
648 format_conf() {
649 local opts=$1
650 local indent=" "
651 local opt
652 local formatted
653 while read -r opt; do
654 if [ -z "$formatted" ]; then
655 formatted="${opt}"
656 else
657 formatted+=$'\n'${indent}${opt}
658 fi
659 done <<< "$opts"
660 echo "$formatted"
661 }
662
663 prepare_conf() {
664 local DAEMONOPTS="
665 log file = $CEPH_OUT_DIR/\$name.log
666 admin socket = $CEPH_ASOK_DIR/\$name.asok
667 chdir = \"\"
668 pid file = $CEPH_OUT_DIR/\$name.pid
669 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
670 "
671
672 local mgr_modules="iostat nfs"
673 if $with_mgr_dashboard; then
674 mgr_modules+=" dashboard"
675 fi
676 if $with_mgr_restful; then
677 mgr_modules+=" restful"
678 fi
679
680 local msgr_conf=''
681 if [ $msgr -eq 21 ]; then
682 msgr_conf="ms bind msgr2 = true
683 ms bind msgr1 = true"
684 fi
685 if [ $msgr -eq 2 ]; then
686 msgr_conf="ms bind msgr2 = true
687 ms bind msgr1 = false"
688 fi
689 if [ $msgr -eq 1 ]; then
690 msgr_conf="ms bind msgr2 = false
691 ms bind msgr1 = true"
692 fi
693
694 wconf <<EOF
695 ; generated by vstart.sh on `date`
696 [$VSTART_SEC]
697 num mon = $CEPH_NUM_MON
698 num osd = $CEPH_NUM_OSD
699 num mds = $CEPH_NUM_MDS
700 num mgr = $CEPH_NUM_MGR
701 num rgw = $CEPH_NUM_RGW
702 num ganesha = $GANESHA_DAEMON_NUM
703
704 [global]
705 fsid = $(uuidgen)
706 osd failsafe full ratio = .99
707 mon osd full ratio = .99
708 mon osd nearfull ratio = .99
709 mon osd backfillfull ratio = .99
710 mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
711 erasure code dir = $EC_PATH
712 plugin dir = $CEPH_LIB
713 run dir = $CEPH_OUT_DIR
714 crash dir = $CEPH_OUT_DIR
715 enable experimental unrecoverable data corrupting features = *
716 osd_crush_chooseleaf_type = 0
717 debug asok assert abort = true
718 $(format_conf "${msgr_conf}")
719 $(format_conf "${extra_conf}")
720 $AUTOSCALER_OPTS
721 EOF
722 if [ "$with_jaeger" -eq 1 ] ; then
723 wconf <<EOF
724 jaeger_agent_port = 6831
725 EOF
726 fi
727 if [ "$lockdep" -eq 1 ] ; then
728 wconf <<EOF
729 lockdep = true
730 EOF
731 fi
732 if [ "$cephx" -eq 1 ] ; then
733 wconf <<EOF
734 auth cluster required = cephx
735 auth service required = cephx
736 auth client required = cephx
737 EOF
738 elif [ "$gssapi_authx" -eq 1 ] ; then
739 wconf <<EOF
740 auth cluster required = gss
741 auth service required = gss
742 auth client required = gss
743 gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
744 EOF
745 else
746 wconf <<EOF
747 auth cluster required = none
748 auth service required = none
749 auth client required = none
750 ms mon client mode = crc
751 EOF
752 fi
753 if [ "$short" -eq 1 ]; then
754 COSDSHORT=" osd max object name len = 460
755 osd max object namespace len = 64"
756 fi
757 if [ "$objectstore" == "bluestore" ]; then
758 if [ "$spdk_enabled" -eq 1 ] || [ "$pmem_enabled" -eq 1 ]; then
759 BLUESTORE_OPTS=" bluestore_block_db_path = \"\"
760 bluestore_block_db_size = 0
761 bluestore_block_db_create = false
762 bluestore_block_wal_path = \"\"
763 bluestore_block_wal_size = 0
764 bluestore_block_wal_create = false
765 bluestore_spdk_mem = 2048"
766 else
767 BLUESTORE_OPTS=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
768 bluestore block db size = 1073741824
769 bluestore block db create = true
770 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
771 bluestore block wal size = 1048576000
772 bluestore block wal create = true"
773 fi
774 if [ "$zoned_enabled" -eq 1 ]; then
775 BLUESTORE_OPTS+="
776 bluestore min alloc size = 65536
777 bluestore prefer deferred size = 0
778 bluestore prefer deferred size hdd = 0
779 bluestore prefer deferred size ssd = 0
780 bluestore allocator = zoned"
781 fi
782 if [ "$io_uring_enabled" -eq 1 ]; then
783 BLUESTORE_OPTS+="
784 bdev ioring = true"
785 fi
786 fi
787 wconf <<EOF
788 [client]
789 $CCLIENTDEBUG
790 keyring = $keyring_fn
791 log file = $CEPH_OUT_CLIENT_DIR/\$name.\$pid.log
792 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
793
794 ; needed for s3tests
795 rgw crypt s3 kms backend = testing
796 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
797 rgw crypt require ssl = false
798 ; uncomment the following to set LC days as the value in seconds;
799 ; needed for passing lc time based s3-tests (can be verbose)
800 ; rgw lc debug interval = 10
801 $(format_conf "${extra_conf}")
802 EOF
803 do_rgw_conf
804 wconf << EOF
805 [mds]
806 $CMDSDEBUG
807 $DAEMONOPTS
808 mds data = $CEPH_DEV_DIR/mds.\$id
809 mds root ino uid = `id -u`
810 mds root ino gid = `id -g`
811 $(format_conf "${extra_conf}")
812 [mgr]
813 mgr disabled modules = rook
814 mgr data = $CEPH_DEV_DIR/mgr.\$id
815 mgr module path = $MGR_PYTHON_PATH
816 cephadm path = $CEPH_BIN/cephadm
817 $DAEMONOPTS
818 $(format_conf "${extra_conf}")
819 [osd]
820 $DAEMONOPTS
821 osd_check_max_object_name_len_on_startup = false
822 osd data = $CEPH_DEV_DIR/osd\$id
823 osd journal = $CEPH_DEV_DIR/osd\$id/journal
824 osd journal size = 100
825 osd class tmp = out
826 osd class dir = $OBJCLASS_PATH
827 osd class load list = *
828 osd class default list = *
829 osd fast shutdown = false
830
831 bluestore fsck on mount = true
832 bluestore block create = true
833 $BLUESTORE_OPTS
834
835 ; kstore
836 kstore fsck on mount = true
837 osd objectstore = $objectstore
838 $COSDSHORT
839 $(format_conf "${extra_conf}")
840 [mon]
841 mon_data_avail_crit = 1
842 mgr initial modules = $mgr_modules
843 $DAEMONOPTS
844 $CMONDEBUG
845 $(format_conf "${extra_conf}")
846 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
847 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
848 auth allow insecure global id reclaim = false
849 EOF
850
851 if [ "$crimson" -eq 1 ]; then
852 wconf <<EOF
853 osd pool default crimson = true
854 EOF
855 fi
856 }
857
858 write_logrotate_conf() {
859 out_dir=$(pwd)"/out/*.log"
860
861 cat << EOF
862 $out_dir
863 {
864 rotate 5
865 size 1G
866 copytruncate
867 compress
868 notifempty
869 missingok
870 sharedscripts
871 postrotate
872 # NOTE: assuring that the absence of one of the following processes
873 # won't abort the logrotate command.
874 killall -u $USER -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || echo ""
875 endscript
876 }
877 EOF
878 }
879
880 init_logrotate() {
881 logrotate_conf_path=$(pwd)"/logrotate.conf"
882 logrotate_state_path=$(pwd)"/logrotate.state"
883
884 if ! test -a $logrotate_conf_path; then
885 if test -a $logrotate_state_path; then
886 rm -f $logrotate_state_path
887 fi
888 write_logrotate_conf > $logrotate_conf_path
889 fi
890 }
891
892 start_mon() {
893 local MONS=""
894 local count=0
895 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
896 do
897 [ $count -eq $CEPH_NUM_MON ] && break;
898 count=$(($count + 1))
899 if [ -z "$MONS" ]; then
900 MONS="$f"
901 else
902 MONS="$MONS $f"
903 fi
904 done
905
906 if [ "$new" -eq 1 ]; then
907 if [ `echo $IP | grep '^127\\.'` ]; then
908 echo
909 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
910 echo " connect. either adjust /etc/hosts, or edit this script to use your"
911 echo " machine's real IP."
912 echo
913 fi
914
915 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
916 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
917 --cap mon 'allow *' \
918 --cap osd 'allow *' \
919 --cap mds 'allow *' \
920 --cap mgr 'allow *' \
921 "$keyring_fn"
922
923 # build a fresh fs monmap, mon fs
924 local params=()
925 local count=0
926 local mon_host=""
927 for f in $MONS
928 do
929 if [ $msgr -eq 1 ]; then
930 A="v1:$IP:$(($CEPH_PORT+$count+1))"
931 fi
932 if [ $msgr -eq 2 ]; then
933 A="v2:$IP:$(($CEPH_PORT+$count+1))"
934 fi
935 if [ $msgr -eq 21 ]; then
936 A="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
937 fi
938 params+=("--addv" "$f" "$A")
939 mon_host="$mon_host $A"
940 wconf <<EOF
941 [mon.$f]
942 host = $HOSTNAME
943 mon data = $CEPH_DEV_DIR/mon.$f
944 EOF
945 count=$(($count + 2))
946 done
947 wconf <<EOF
948 [global]
949 mon host = $mon_host
950 EOF
951 prun "$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
952
953 for f in $MONS
954 do
955 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
956 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
957 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
958 done
959
960 prun rm -- "$monmap_fn"
961 fi
962
963 # start monitors
964 for f in $MONS
965 do
966 run 'mon' $f $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
967 done
968
969 if [ "$crimson" -eq 1 ]; then
970 $CEPH_BIN/ceph osd set-allow-crimson --yes-i-really-mean-it
971 fi
972 }
973
974 start_osd() {
975 if [ $inc_osd_num -gt 0 ]; then
976 old_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
977 start=$old_maxosd
978 end=$(($start-1+$inc_osd_num))
979 overwrite_conf=1 # fake wconf
980 else
981 start=0
982 end=$(($CEPH_NUM_OSD-1))
983 fi
984 local osds_wait
985 for osd in `seq $start $end`
986 do
987 local extra_seastar_args
988 if [ "$ceph_osd" == "crimson-osd" ]; then
989 bottom_cpu=$(( osd * crimson_smp ))
990 top_cpu=$(( bottom_cpu + crimson_smp - 1 ))
991 # set a single CPU nodes for each osd
992 extra_seastar_args="--cpuset $bottom_cpu-$top_cpu"
993 if [ "$debug" -ne 0 ]; then
994 extra_seastar_args+=" --debug"
995 fi
996 if [ "$trace" -ne 0 ]; then
997 extra_seastar_args+=" --trace"
998 fi
999 fi
1000 if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
1001 wconf <<EOF
1002 [osd.$osd]
1003 host = $HOSTNAME
1004 EOF
1005 if [ "$spdk_enabled" -eq 1 ]; then
1006 wconf <<EOF
1007 bluestore_block_path = spdk:${bluestore_spdk_dev[$osd]}
1008 EOF
1009 elif [ "$pmem_enabled" -eq 1 ]; then
1010 wconf <<EOF
1011 bluestore_block_path = ${bluestore_pmem_file}
1012 EOF
1013 fi
1014 rm -rf $CEPH_DEV_DIR/osd$osd || true
1015 if command -v btrfs > /dev/null; then
1016 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
1017 fi
1018 if [ -n "$kstore_path" ]; then
1019 ln -s $kstore_path $CEPH_DEV_DIR/osd$osd
1020 else
1021 mkdir -p $CEPH_DEV_DIR/osd$osd
1022 if [ -n "${block_devs[$osd]}" ]; then
1023 dd if=/dev/zero of=${block_devs[$osd]} bs=1M count=1
1024 ln -s ${block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block
1025 fi
1026 if [ -n "${secondary_block_devs[$osd]}" ]; then
1027 dd if=/dev/zero of=${secondary_block_devs[$osd]} bs=1M count=1
1028 mkdir -p $CEPH_DEV_DIR/osd$osd/block.${secondary_block_devs_type}.1
1029 ln -s ${secondary_block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block.${secondary_block_devs_type}.1/block
1030 fi
1031 fi
1032 if [ "$objectstore" == "bluestore" ]; then
1033 wconf <<EOF
1034 bluestore fsck on mount = false
1035 EOF
1036 fi
1037
1038 local uuid=`uuidgen`
1039 echo "add osd$osd $uuid"
1040 OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
1041 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
1042 ceph_adm osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
1043 rm $CEPH_DEV_DIR/osd$osd/new.json
1044 prun $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args \
1045 2>&1 | tee $CEPH_OUT_DIR/osd-mkfs.$osd.log
1046
1047 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
1048 cat > $key_fn<<EOF
1049 [osd.$osd]
1050 key = $OSD_SECRET
1051 EOF
1052 fi
1053 echo start osd.$osd
1054 local osd_pid
1055 echo 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
1056 $extra_seastar_args $extra_osd_args \
1057 -i $osd $ARGS $COSD_ARGS
1058 run 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
1059 $extra_seastar_args $extra_osd_args \
1060 -i $osd $ARGS $COSD_ARGS &
1061 osd_pid=$!
1062 if $parallel; then
1063 osds_wait=$osd_pid
1064 else
1065 wait $osd_pid
1066 fi
1067 done
1068 if $parallel; then
1069 for p in $osds_wait; do
1070 wait $p
1071 done
1072 debug echo OSDs started
1073 fi
1074 if [ $inc_osd_num -gt 0 ]; then
1075 # update num osd
1076 new_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1077 sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
1078 fi
1079 }
1080
1081 create_mgr_restful_secret() {
1082 while ! ceph_adm -h | grep -c -q ^restful ; do
1083 debug echo 'waiting for mgr restful module to start'
1084 sleep 1
1085 done
1086 local secret_file
1087 if ceph_adm restful create-self-signed-cert > /dev/null; then
1088 secret_file=`mktemp`
1089 ceph_adm restful create-key admin -o $secret_file
1090 RESTFUL_SECRET=`cat $secret_file`
1091 rm $secret_file
1092 else
1093 debug echo MGR Restful is not working, perhaps the package is not installed?
1094 fi
1095 }
1096
1097 start_mgr() {
1098 local mgr=0
1099 local ssl=${DASHBOARD_SSL:-1}
1100 # avoid monitors on nearby ports (which test/*.sh use extensively)
1101 MGR_PORT=$(($CEPH_PORT + 1000))
1102 PROMETHEUS_PORT=9283
1103 for name in x y z a b c d e f g h i j k l m n o p
1104 do
1105 [ $mgr -eq $CEPH_NUM_MGR ] && break
1106 mgr=$(($mgr + 1))
1107 if [ "$new" -eq 1 ]; then
1108 mkdir -p $CEPH_DEV_DIR/mgr.$name
1109 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
1110 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
1111 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
1112
1113 wconf <<EOF
1114 [mgr.$name]
1115 host = $HOSTNAME
1116 EOF
1117
1118 if $with_mgr_dashboard ; then
1119 local port_option="ssl_server_port"
1120 local http_proto="https"
1121 if [ "$ssl" == "0" ]; then
1122 port_option="server_port"
1123 http_proto="http"
1124 ceph_adm config set mgr mgr/dashboard/ssl false --force
1125 fi
1126 ceph_adm config set mgr mgr/dashboard/$name/$port_option $MGR_PORT --force
1127 if [ $mgr -eq 1 ]; then
1128 DASH_URLS="$http_proto://$IP:$MGR_PORT"
1129 else
1130 DASH_URLS+=", $http_proto://$IP:$MGR_PORT"
1131 fi
1132 fi
1133 MGR_PORT=$(($MGR_PORT + 1000))
1134 ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
1135 PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
1136
1137 ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
1138 if [ $mgr -eq 1 ]; then
1139 RESTFUL_URLS="https://$IP:$MGR_PORT"
1140 else
1141 RESTFUL_URLS+=", https://$IP:$MGR_PORT"
1142 fi
1143 MGR_PORT=$(($MGR_PORT + 1000))
1144 fi
1145
1146 debug echo "Starting mgr.${name}"
1147 run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
1148 done
1149
1150 while ! ceph_adm mgr stat | jq -e '.available'; do
1151 debug echo 'waiting for mgr to become available'
1152 sleep 1
1153 done
1154
1155 if [ "$new" -eq 1 ]; then
1156 # setting login credentials for dashboard
1157 if $with_mgr_dashboard; then
1158 while ! ceph_adm -h | grep -c -q ^dashboard ; do
1159 debug echo 'waiting for mgr dashboard module to start'
1160 sleep 1
1161 done
1162 DASHBOARD_ADMIN_SECRET_FILE="${CEPH_CONF_PATH}/dashboard-admin-secret.txt"
1163 printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
1164 ceph_adm dashboard ac-user-create admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" \
1165 administrator --force-password
1166 if [ "$ssl" != "0" ]; then
1167 if ! ceph_adm dashboard create-self-signed-cert; then
1168 debug echo dashboard module not working correctly!
1169 fi
1170 fi
1171 fi
1172 if $with_mgr_restful; then
1173 create_mgr_restful_secret
1174 fi
1175 fi
1176
1177 if [ "$cephadm" -eq 1 ]; then
1178 debug echo Enabling cephadm orchestrator
1179 if [ "$new" -eq 1 ]; then
1180 digest=$(curl -s \
1181 https://hub.docker.com/v2/repositories/ceph/daemon-base/tags/latest-master-devel \
1182 | jq -r '.images[0].digest')
1183 ceph_adm config set global container_image "docker.io/ceph/daemon-base@$digest"
1184 fi
1185 ceph_adm config-key set mgr/cephadm/ssh_identity_key -i ~/.ssh/id_rsa
1186 ceph_adm config-key set mgr/cephadm/ssh_identity_pub -i ~/.ssh/id_rsa.pub
1187 ceph_adm mgr module enable cephadm
1188 ceph_adm orch set backend cephadm
1189 ceph_adm orch host add "$(hostname)"
1190 ceph_adm orch apply crash '*'
1191 ceph_adm config set mgr mgr/cephadm/allow_ptrace true
1192 fi
1193 }
1194
1195 start_mds() {
1196 local mds=0
1197 for name in a b c d e f g h i j k l m n o p
1198 do
1199 [ $mds -eq $CEPH_NUM_MDS ] && break
1200 mds=$(($mds + 1))
1201
1202 if [ "$new" -eq 1 ]; then
1203 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
1204 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
1205 wconf <<EOF
1206 [mds.$name]
1207 host = $HOSTNAME
1208 EOF
1209 if [ "$standby" -eq 1 ]; then
1210 mkdir -p $CEPH_DEV_DIR/mds.${name}s
1211 wconf <<EOF
1212 mds standby for rank = $mds
1213 [mds.${name}s]
1214 mds standby replay = true
1215 mds standby for name = ${name}
1216 EOF
1217 fi
1218 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
1219 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow rw tag cephfs *=*' mds 'allow' mgr 'allow profile mds'
1220 if [ "$standby" -eq 1 ]; then
1221 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
1222 "$CEPH_DEV_DIR/mds.${name}s/keyring"
1223 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
1224 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
1225 fi
1226 fi
1227
1228 run 'mds' $name $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
1229 if [ "$standby" -eq 1 ]; then
1230 run 'mds' $name $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
1231 fi
1232
1233 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
1234 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
1235 #ceph_adm mds set max_mds 2
1236 done
1237
1238 if [ $new -eq 1 ]; then
1239 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
1240 sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
1241 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
1242 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
1243 fi
1244
1245 # wait for volume module to load
1246 while ! ceph_adm fs volume ls ; do sleep 1 ; done
1247 local fs=0
1248 for name in a b c d e f g h i j k l m n o p
1249 do
1250 ceph_adm fs volume create ${name}
1251 ceph_adm fs authorize ${name} "client.fs_${name}" / rwp >> "$keyring_fn"
1252 fs=$(($fs + 1))
1253 [ $fs -eq $CEPH_NUM_FS ] && break
1254 done
1255 fi
1256 fi
1257
1258 }
1259
1260 # Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
1261 # nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
1262 # Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
1263 # the packages are available at
1264 # https://wiki.centos.org/SpecialInterestGroup/Storage
1265 # Similarly for Ubuntu>=16.04 follow the instructions on
1266 # https://launchpad.net/~nfs-ganesha
1267
1268 start_ganesha() {
1269 cluster_id="vstart"
1270 GANESHA_PORT=$(($CEPH_PORT + 4000))
1271 local ganesha=0
1272 test_user="$cluster_id"
1273 pool_name=".nfs"
1274 namespace=$cluster_id
1275 url="rados://$pool_name/$namespace/conf-nfs.$test_user"
1276
1277 prun ceph_adm auth get-or-create client.$test_user \
1278 mon "allow r" \
1279 osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
1280 mds "allow rw path=/" \
1281 >> "$keyring_fn"
1282
1283 ceph_adm mgr module enable test_orchestrator
1284 ceph_adm orch set backend test_orchestrator
1285 ceph_adm test_orchestrator load_data -i $CEPH_ROOT/src/pybind/mgr/test_orchestrator/dummy_data.json
1286 prun ceph_adm nfs cluster create $cluster_id
1287 prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path "/cephfs"
1288
1289 for name in a b c d e f g h i j k l m n o p
1290 do
1291 [ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
1292
1293 port=$(($GANESHA_PORT + ganesha))
1294 ganesha=$(($ganesha + 1))
1295 ganesha_dir="$CEPH_DEV_DIR/ganesha.$name"
1296 prun rm -rf $ganesha_dir
1297 prun mkdir -p $ganesha_dir
1298
1299 echo "NFS_CORE_PARAM {
1300 Enable_NLM = false;
1301 Enable_RQUOTA = false;
1302 Protocols = 4;
1303 NFS_Port = $port;
1304 }
1305
1306 MDCACHE {
1307 Dir_Chunk = 0;
1308 }
1309
1310 NFSv4 {
1311 RecoveryBackend = rados_cluster;
1312 Minor_Versions = 1, 2;
1313 }
1314
1315 RADOS_KV {
1316 pool = '$pool_name';
1317 namespace = $namespace;
1318 UserId = $test_user;
1319 nodeid = $name;
1320 }
1321
1322 RADOS_URLS {
1323 Userid = $test_user;
1324 watch_url = '$url';
1325 }
1326
1327 %url $url" > "$ganesha_dir/ganesha-$name.conf"
1328 wconf <<EOF
1329 [ganesha.$name]
1330 host = $HOSTNAME
1331 ip = $IP
1332 port = $port
1333 ganesha data = $ganesha_dir
1334 pid file = $CEPH_OUT_DIR/ganesha-$name.pid
1335 EOF
1336
1337 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace add $name
1338 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
1339
1340 prun env CEPH_CONF="${conf_fn}" ganesha.nfsd -L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
1341
1342 # Wait few seconds for grace period to be removed
1343 sleep 2
1344
1345 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
1346
1347 echo "$test_user ganesha daemon $name started on port: $port"
1348 done
1349 }
1350
1351 if [ "$debug" -eq 0 ]; then
1352 CMONDEBUG='
1353 debug mon = 10
1354 debug ms = 1'
1355 CCLIENTDEBUG=''
1356 CMDSDEBUG=''
1357 else
1358 debug echo "** going verbose **"
1359 CMONDEBUG='
1360 debug osd = 20
1361 debug mon = 20
1362 debug osd = 20
1363 debug paxos = 20
1364 debug auth = 20
1365 debug mgrc = 20
1366 debug ms = 1'
1367 CCLIENTDEBUG='
1368 debug client = 20'
1369 CMDSDEBUG='
1370 debug mds = 20'
1371 fi
1372
1373 # Crimson doesn't support PG merge/split yet.
1374 if [ "$ceph_osd" == "crimson-osd" ]; then
1375 AUTOSCALER_OPTS='
1376 osd_pool_default_pg_autoscale_mode = off'
1377 fi
1378
1379 if [ -n "$MON_ADDR" ]; then
1380 CMON_ARGS=" -m "$MON_ADDR
1381 COSD_ARGS=" -m "$MON_ADDR
1382 CMDS_ARGS=" -m "$MON_ADDR
1383 fi
1384
1385 if [ -z "$CEPH_PORT" ]; then
1386 while [ true ]
1387 do
1388 CEPH_PORT="$(echo $(( RANDOM % 1000 + 40000 )))"
1389 ss -a -n | egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev/null 2>&1 || break
1390 done
1391 fi
1392
1393 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
1394
1395 # sudo if btrfs
1396 [ -d $CEPH_DEV_DIR/osd0/. ] && [ -e $CEPH_DEV_DIR/sudo ] && SUDO="sudo"
1397
1398 if [ $inc_osd_num -eq 0 ]; then
1399 prun $SUDO rm -f core*
1400 fi
1401
1402 [ -d $CEPH_ASOK_DIR ] || mkdir -p $CEPH_ASOK_DIR
1403 [ -d $CEPH_OUT_DIR ] || mkdir -p $CEPH_OUT_DIR
1404 [ -d $CEPH_DEV_DIR ] || mkdir -p $CEPH_DEV_DIR
1405 [ -d $CEPH_OUT_CLIENT_DIR ] || mkdir -p $CEPH_OUT_CLIENT_DIR
1406 if [ $inc_osd_num -eq 0 ]; then
1407 $SUDO find "$CEPH_OUT_DIR" -type f -delete
1408 fi
1409 [ -d gmon ] && $SUDO rm -rf gmon/*
1410
1411 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
1412
1413
1414 # figure machine's ip
1415 HOSTNAME=`hostname -s`
1416 if [ -n "$ip" ]; then
1417 IP="$ip"
1418 else
1419 echo hostname $HOSTNAME
1420 if [ -x "$(which ip 2>/dev/null)" ]; then
1421 IP_CMD="ip addr"
1422 else
1423 IP_CMD="ifconfig"
1424 fi
1425 # filter out IPv4 and localhost addresses
1426 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
1427 # if nothing left, try using localhost address, it might work
1428 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
1429 fi
1430 echo "ip $IP"
1431 echo "port $CEPH_PORT"
1432
1433
1434 [ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
1435
1436 ceph_adm() {
1437 if [ "$cephx" -eq 1 ]; then
1438 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
1439 else
1440 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
1441 fi
1442 }
1443
1444 if [ $inc_osd_num -gt 0 ]; then
1445 start_osd
1446 exit
1447 fi
1448
1449 if [ "$new" -eq 1 ]; then
1450 prepare_conf
1451 fi
1452
1453 if [ $CEPH_NUM_MON -gt 0 ]; then
1454 start_mon
1455
1456 debug echo Populating config ...
1457 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1458 [global]
1459 osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
1460 osd_pool_default_min_size = 1
1461
1462 [mon]
1463 mon_osd_reporter_subtree_level = osd
1464 mon_data_avail_warn = 2
1465 mon_data_avail_crit = 1
1466 mon_allow_pool_delete = true
1467 mon_allow_pool_size_one = true
1468
1469 [osd]
1470 osd_scrub_load_threshold = 2000
1471 osd_debug_op_order = true
1472 osd_debug_misdirected_ops = true
1473 osd_copyfrom_max_chunk = 524288
1474
1475 [mds]
1476 mds_debug_frag = true
1477 mds_debug_auth_pins = true
1478 mds_debug_subtrees = true
1479
1480 [mgr]
1481 mgr/telemetry/nag = false
1482 mgr/telemetry/enable = false
1483
1484 EOF
1485
1486 if [ "$debug" -ne 0 ]; then
1487 debug echo Setting debug configs ...
1488 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1489 [mgr]
1490 debug_ms = 1
1491 debug_mgr = 20
1492 debug_monc = 20
1493 debug_mon = 20
1494
1495 [osd]
1496 debug_ms = 1
1497 debug_osd = 25
1498 debug_objecter = 20
1499 debug_monc = 20
1500 debug_mgrc = 20
1501 debug_journal = 20
1502 debug_bluestore = 20
1503 debug_bluefs = 20
1504 debug_rocksdb = 20
1505 debug_bdev = 20
1506 debug_reserver = 10
1507 debug_objclass = 20
1508
1509 [mds]
1510 debug_ms = 1
1511 debug_mds = 20
1512 debug_monc = 20
1513 debug_mgrc = 20
1514 mds_debug_scatterstat = true
1515 mds_verify_scatter = true
1516 EOF
1517 fi
1518 if [ "$cephadm" -gt 0 ]; then
1519 debug echo Setting mon public_network ...
1520 public_network=$(ip route list | grep -w "$IP" | awk '{print $1}')
1521 ceph_adm config set mon public_network $public_network
1522 fi
1523 fi
1524
1525 if [ "$ceph_osd" == "crimson-osd" ]; then
1526 $CEPH_BIN/ceph -c $conf_fn config set osd crimson_seastar_smp $crimson_smp
1527 fi
1528
1529 if [ $CEPH_NUM_MGR -gt 0 ]; then
1530 start_mgr
1531 fi
1532
1533 # osd
1534 if [ $CEPH_NUM_OSD -gt 0 ]; then
1535 start_osd
1536 fi
1537
1538 # mds
1539 if [ "$smallmds" -eq 1 ]; then
1540 wconf <<EOF
1541 [mds]
1542 mds log max segments = 2
1543 # Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
1544 mds cache memory limit = 100M
1545 EOF
1546 fi
1547
1548 if [ $CEPH_NUM_MDS -gt 0 ]; then
1549 start_mds
1550 # key with access to all FS
1551 ceph_adm fs authorize \* "client.fs" / rwp >> "$keyring_fn"
1552 fi
1553
1554 # Don't set max_mds until all the daemons are started, otherwise
1555 # the intended standbys might end up in active roles.
1556 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1557 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
1558 fi
1559 fs=0
1560 for name in a b c d e f g h i j k l m n o p
1561 do
1562 [ $fs -eq $CEPH_NUM_FS ] && break
1563 fs=$(($fs + 1))
1564 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1565 ceph_adm fs set "${name}" max_mds "$CEPH_MAX_MDS"
1566 fi
1567 done
1568
1569 # mgr
1570
1571 if [ "$ec" -eq 1 ]; then
1572 ceph_adm <<EOF
1573 osd erasure-code-profile set ec-profile m=2 k=2
1574 osd pool create ec erasure ec-profile
1575 EOF
1576 fi
1577
1578 do_cache() {
1579 while [ -n "$*" ]; do
1580 p="$1"
1581 shift
1582 debug echo "creating cache for pool $p ..."
1583 ceph_adm <<EOF
1584 osd pool create ${p}-cache
1585 osd tier add $p ${p}-cache
1586 osd tier cache-mode ${p}-cache writeback
1587 osd tier set-overlay $p ${p}-cache
1588 EOF
1589 done
1590 }
1591 do_cache $cache
1592
1593 do_hitsets() {
1594 while [ -n "$*" ]; do
1595 pool="$1"
1596 type="$2"
1597 shift
1598 shift
1599 debug echo "setting hit_set on pool $pool type $type ..."
1600 ceph_adm <<EOF
1601 osd pool set $pool hit_set_type $type
1602 osd pool set $pool hit_set_count 8
1603 osd pool set $pool hit_set_period 30
1604 EOF
1605 done
1606 }
1607 do_hitsets $hitset
1608
1609 do_rgw_create_bucket()
1610 {
1611 # Create RGW Bucket
1612 local rgw_python_file='rgw-create-bucket.py'
1613 echo "import boto
1614 import boto.s3.connection
1615
1616 conn = boto.connect_s3(
1617 aws_access_key_id = '$s3_akey',
1618 aws_secret_access_key = '$s3_skey',
1619 host = '$HOSTNAME',
1620 port = 80,
1621 is_secure=False,
1622 calling_format = boto.s3.connection.OrdinaryCallingFormat(),
1623 )
1624
1625 bucket = conn.create_bucket('nfs-bucket')
1626 print('created new bucket')" > "$CEPH_OUT_DIR/$rgw_python_file"
1627 prun python $CEPH_OUT_DIR/$rgw_python_file
1628 }
1629
1630 do_rgw_create_users()
1631 {
1632 # Create S3 user
1633 s3_akey='0555b35654ad1656d804'
1634 s3_skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
1635 debug echo "setting up user testid"
1636 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $s3_akey --secret $s3_skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
1637
1638 # Create S3-test users
1639 # See: https://github.com/ceph/s3-tests
1640 debug echo "setting up s3-test users"
1641 $CEPH_BIN/radosgw-admin user create \
1642 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1643 --access-key ABCDEFGHIJKLMNOPQRST \
1644 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
1645 --display-name youruseridhere \
1646 --email s3@example.com --caps="user-policy=*" -c $conf_fn > /dev/null
1647 $CEPH_BIN/radosgw-admin user create \
1648 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
1649 --access-key NOPQRSTUVWXYZABCDEFG \
1650 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
1651 --display-name john.doe \
1652 --email john.doe@example.com -c $conf_fn > /dev/null
1653 $CEPH_BIN/radosgw-admin user create \
1654 --tenant testx \
1655 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1656 --access-key HIJKLMNOPQRSTUVWXYZA \
1657 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
1658 --display-name tenanteduser \
1659 --email tenanteduser@example.com -c $conf_fn > /dev/null
1660
1661 # Create Swift user
1662 debug echo "setting up user tester"
1663 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
1664
1665 echo ""
1666 echo "S3 User Info:"
1667 echo " access key: $s3_akey"
1668 echo " secret key: $s3_skey"
1669 echo ""
1670 echo "Swift User Info:"
1671 echo " account : test"
1672 echo " user : tester"
1673 echo " password : testing"
1674 echo ""
1675 }
1676
1677 do_rgw()
1678 {
1679 if [ "$new" -eq 1 ]; then
1680 do_rgw_create_users
1681 if [ -n "$rgw_compression" ]; then
1682 debug echo "setting compression type=$rgw_compression"
1683 $CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
1684 fi
1685 fi
1686
1687 if [ -n "$rgw_flight_frontend" ] ;then
1688 debug echo "starting arrow_flight frontend on first rgw"
1689 fi
1690
1691 # Start server
1692 if [ "$cephadm" -gt 0 ]; then
1693 ceph_adm orch apply rgw rgwTest
1694 return
1695 fi
1696
1697 RGWDEBUG=""
1698 if [ "$debug" -ne 0 ]; then
1699 RGWDEBUG="--debug-rgw=20 --debug-ms=1"
1700 fi
1701
1702 local CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT}"
1703 local CEPH_RGW_HTTPS="${CEPH_RGW_PORT: -1}"
1704 if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
1705 CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT::-1}"
1706 else
1707 CEPH_RGW_HTTPS=""
1708 fi
1709 RGWSUDO=
1710 [ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO=sudo
1711
1712 current_port=$CEPH_RGW_PORT
1713 # allow only first rgw to start arrow_flight server/port
1714 local flight_conf=$rgw_flight_frontend
1715 for n in $(seq 1 $CEPH_NUM_RGW); do
1716 rgw_name="client.rgw.${current_port}"
1717
1718 ceph_adm auth get-or-create $rgw_name \
1719 mon 'allow rw' \
1720 osd 'allow rwx' \
1721 mgr 'allow rw' \
1722 >> "$keyring_fn"
1723
1724 debug echo start rgw on http${CEPH_RGW_HTTPS}://localhost:${current_port}
1725 run 'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn \
1726 --log-file=${CEPH_OUT_DIR}/radosgw.${current_port}.log \
1727 --admin-socket=${CEPH_OUT_DIR}/radosgw.${current_port}.asok \
1728 --pid-file=${CEPH_OUT_DIR}/radosgw.${current_port}.pid \
1729 --rgw_luarocks_location=${CEPH_OUT_DIR}/luarocks \
1730 ${RGWDEBUG} \
1731 -n ${rgw_name} \
1732 "--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}${flight_conf:+,arrow_flight}"
1733
1734 i=$(($i + 1))
1735 [ $i -eq $CEPH_NUM_RGW ] && break
1736
1737 current_port=$((current_port+1))
1738 unset flight_conf
1739 done
1740 }
1741 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1742 do_rgw
1743 fi
1744
1745 # Ganesha Daemons
1746 if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
1747 pseudo_path="/cephfs"
1748 if [ "$cephadm" -gt 0 ]; then
1749 cluster_id="vstart"
1750 port="2049"
1751 prun ceph_adm nfs cluster create $cluster_id
1752 if [ $CEPH_NUM_MDS -gt 0 ]; then
1753 prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path $pseudo_path
1754 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1755 fi
1756 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1757 pseudo_path="/rgw"
1758 do_rgw_create_bucket
1759 prun ceph_adm nfs export create rgw --cluster-id $cluster_id --pseudo-path $pseudo_path --bucket "nfs-bucket"
1760 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1761 fi
1762 else
1763 start_ganesha
1764 echo "Mount using: mount -t nfs -o port=<ganesha-port-num> $IP:$pseudo_path mountpoint"
1765 fi
1766 fi
1767
1768 docker_service(){
1769 local service=''
1770 #prefer podman
1771 if command -v podman > /dev/null; then
1772 service="podman"
1773 elif pgrep -f docker > /dev/null; then
1774 service="docker"
1775 fi
1776 if [ -n "$service" ]; then
1777 echo "using $service for deploying jaeger..."
1778 #check for exited container, remove them and restart container
1779 if [ "$($service ps -aq -f status=exited -f name=jaeger)" ]; then
1780 $service rm jaeger
1781 fi
1782 if [ ! "$(podman ps -aq -f name=jaeger)" ]; then
1783 $service "$@"
1784 fi
1785 else
1786 echo "cannot find docker or podman, please restart service and rerun."
1787 fi
1788 }
1789
1790 echo ""
1791 if [ $with_jaeger -eq 1 ]; then
1792 debug echo "Enabling jaegertracing..."
1793 docker_service run -d --name jaeger \
1794 -p 5775:5775/udp \
1795 -p 6831:6831/udp \
1796 -p 6832:6832/udp \
1797 -p 5778:5778 \
1798 -p 16686:16686 \
1799 -p 14268:14268 \
1800 -p 14250:14250 \
1801 quay.io/jaegertracing/all-in-one
1802 fi
1803
1804 debug echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
1805
1806 echo ""
1807 if [ "$new" -eq 1 ]; then
1808 if $with_mgr_dashboard; then
1809 cat <<EOF
1810 dashboard urls: $DASH_URLS
1811 w/ user/pass: admin / admin
1812 EOF
1813 fi
1814 if $with_mgr_restful; then
1815 cat <<EOF
1816 restful urls: $RESTFUL_URLS
1817 w/ user/pass: admin / $RESTFUL_SECRET
1818 EOF
1819 fi
1820 fi
1821
1822 echo ""
1823 # add header to the environment file
1824 {
1825 echo "#"
1826 echo "# source this file into your shell to set up the environment."
1827 echo "# For example:"
1828 echo "# $ . $CEPH_DIR/vstart_environment.sh"
1829 echo "#"
1830 } > $CEPH_DIR/vstart_environment.sh
1831 {
1832 echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
1833 echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
1834 echo "export PATH=$CEPH_DIR/bin:\$PATH"
1835
1836 if [ "$CEPH_DIR" != "$PWD" ]; then
1837 echo "export CEPH_CONF=$conf_fn"
1838 echo "export CEPH_KEYRING=$keyring_fn"
1839 fi
1840
1841 if [ -n "$CEPHFS_SHELL" ]; then
1842 echo "alias cephfs-shell=$CEPHFS_SHELL"
1843 fi
1844 } | tee -a $CEPH_DIR/vstart_environment.sh
1845
1846 echo "CEPH_DEV=1"
1847
1848 # always keep this section at the very bottom of this file
1849 STRAY_CONF_PATH="/etc/ceph/ceph.conf"
1850 if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
1851 echo ""
1852 echo ""
1853 echo "WARNING:"
1854 echo " Please remove stray $STRAY_CONF_PATH if not needed."
1855 echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
1856 echo " and may lead to undesired results."
1857 echo ""
1858 echo "NOTE:"
1859 echo " Remember to restart cluster after removing $STRAY_CONF_PATH"
1860 fi
1861
1862 init_logrotate