2 # -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
3 # vim: softtabstop=4 shiftwidth=4 expandtab
10 if [[ "$s" =~ \
]]; then
11 printf -- "'%s' " "$s"
24 debug quoted_print
"$@" '&'
29 debug quoted_print
"$@"
34 if [ -n "$VSTART_DEST" ]; then
36 SRC_PATH
=`(cd $SRC_PATH; pwd)`
42 CEPH_CONF_PATH
=$VSTART_DEST
43 CEPH_DEV_DIR
=$VSTART_DEST/dev
44 CEPH_OUT_DIR
=$VSTART_DEST/out
45 CEPH_ASOK_DIR
=$VSTART_DEST/out
48 get_cmake_variable
() {
50 grep "${variable}:" CMakeCache.txt | cut
-d "=" -f 2
53 # for running out of the CMake build directory
54 if [ -e CMakeCache.txt
]; then
55 # Out of tree build, learn source location from CMakeCache.txt
56 CEPH_ROOT
=$
(get_cmake_variable ceph_SOURCE_DIR
)
58 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH
=$CEPH_ROOT/src
/pybind
/mgr
61 # use CEPH_BUILD_ROOT to vstart from a 'make install'
62 if [ -n "$CEPH_BUILD_ROOT" ]; then
63 [ -z "$CEPH_BIN" ] && CEPH_BIN
=$CEPH_BUILD_ROOT/bin
64 [ -z "$CEPH_LIB" ] && CEPH_LIB
=$CEPH_BUILD_ROOT/lib
65 [ -z "$EC_PATH" ] && EC_PATH
=$CEPH_LIB/erasure-code
66 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH
=$CEPH_LIB/rados-classes
67 # make install should install python extensions into PYTHONPATH
68 elif [ -n "$CEPH_ROOT" ]; then
69 [ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL
=$CEPH_ROOT/src
/tools
/cephfs
/cephfs-shell
70 [ -z "$PYBIND" ] && PYBIND
=$CEPH_ROOT/src
/pybind
71 [ -z "$CEPH_BIN" ] && CEPH_BIN
=$CEPH_BUILD_DIR/bin
72 [ -z "$CEPH_ADM" ] && CEPH_ADM
=$CEPH_BIN/ceph
73 [ -z "$INIT_CEPH" ] && INIT_CEPH
=$CEPH_BIN/init-ceph
74 [ -z "$CEPH_LIB" ] && CEPH_LIB
=$CEPH_BUILD_DIR/lib
75 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH
=$CEPH_LIB
76 [ -z "$EC_PATH" ] && EC_PATH
=$CEPH_LIB
77 [ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON
=$CEPH_ROOT/src
/python-common
80 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
84 [ -z "$PYBIND" ] && PYBIND
=.
/pybind
86 [ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON
="$CEPH_PYTHON_COMMON:"
87 CYTHON_PYTHONPATH
="$CEPH_LIB/cython_modules/lib.3"
88 export PYTHONPATH
=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
90 export LD_LIBRARY_PATH
=$CEPH_LIB:$LD_LIBRARY_PATH
91 export DYLD_LIBRARY_PATH
=$CEPH_LIB:$DYLD_LIBRARY_PATH
92 # Suppress logging for regular use that indicated that we are using a
93 # development version. vstart.sh is only used during testing and
97 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON
="$MON"
98 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD
="$OSD"
99 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS
="$MDS"
100 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR
="$MGR"
101 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS
="$FS"
102 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW
="$RGW"
103 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM
="$NFS"
105 # if none of the CEPH_NUM_* number is specified, kill the existing
107 if [ -z "$CEPH_NUM_MON" -a \
108 -z "$CEPH_NUM_OSD" -a \
109 -z "$CEPH_NUM_MDS" -a \
110 -z "$CEPH_NUM_MGR" -a \
111 -z "$GANESHA_DAEMON_NUM" ]; then
117 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON
=3
118 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD
=3
119 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS
=3
120 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR
=1
121 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS
=1
122 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS
=1
123 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW
=0
124 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM
=0
126 [ -z "$CEPH_DIR" ] && CEPH_DIR
="$PWD"
127 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR
="$CEPH_DIR/dev"
128 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR
="$CEPH_DIR/out"
129 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT
=8000
130 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH
=$CEPH_DIR
132 if [ $CEPH_NUM_OSD -gt 3 ]; then
133 OSD_POOL_DEFAULT_SIZE
=3
135 OSD_POOL_DEFAULT_SIZE
=$CEPH_NUM_OSD
152 cephx
=1 #turn cephx on by default
155 if [ `uname` = FreeBSD
]; then
156 objectstore
="filestore"
158 objectstore
="bluestore"
163 lockdep
=${LOCKDEP:-1}
164 spdk_enabled
=0 #disable SPDK by default
167 with_mgr_dashboard
=true
168 if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
169 [[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
170 debug
echo "ceph-mgr dashboard not built - disabling."
171 with_mgr_dashboard
=false
178 VSTART_SEC
="client.vstart.sh"
184 conf_fn
="$CEPH_CONF_PATH/ceph.conf"
185 keyring_fn
="$CEPH_CONF_PATH/keyring"
186 osdmap_fn
="/tmp/ceph_osdmap.$$"
187 monmap_fn
="/tmp/ceph_monmap.$$"
192 usage
="usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d\n"
193 usage
=$usage"options:\n"
194 usage
=$usage"\t-d, --debug\n"
195 usage
=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
196 usage
=$usage"\t-l, --localhost: use localhost instead of hostname\n"
197 usage
=$usage"\t-i <ip>: bind to specific ip\n"
198 usage
=$usage"\t-n, --new\n"
199 usage
=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
200 usage
=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
201 usage
=$usage"\t--redirect-output: only useful with nodaemon, directs output to log file\n"
202 usage
=$usage"\t--smallmds: limit mds cache memory limit\n"
203 usage
=$usage"\t-m ip:port\t\tspecify monitor address\n"
204 usage
=$usage"\t-k keep old configuration files (default)\n"
205 usage
=$usage"\t-x enable cephx (on by default)\n"
206 usage
=$usage"\t-X disable cephx\n"
207 usage
=$usage"\t-g --gssapi enable Kerberos/GSSApi authentication\n"
208 usage
=$usage"\t-G disable Kerberos/GSSApi authentication\n"
209 usage
=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
210 usage
=$usage"\t-e : create an erasure pool\n";
211 usage
=$usage"\t-o config\t\t add extra config parameters to all sections\n"
212 usage
=$usage"\t--rgw_port specify ceph rgw http listen port\n"
213 usage
=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
214 usage
=$usage"\t--rgw_compression specify the rgw compression plugin\n"
215 usage
=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend (default)\n"
216 usage
=$usage"\t-f, --filestore use filestore as the osd objectstore backend\n"
217 usage
=$usage"\t-K, --kstore use kstore as the osd objectstore backend\n"
218 usage
=$usage"\t--memstore use memstore as the osd objectstore backend\n"
219 usage
=$usage"\t--cache <pool>: enable cache tiering on pool\n"
220 usage
=$usage"\t--short: short object names only; necessary for ext4 dev\n"
221 usage
=$usage"\t--nolockdep disable lockdep\n"
222 usage
=$usage"\t--multimds <count> allow multimds with maximum active count\n"
223 usage
=$usage"\t--without-dashboard: do not run using mgr dashboard\n"
224 usage
=$usage"\t--bluestore-spdk <vendor>:<device>: enable SPDK and specify the PCI-ID of the NVME device\n"
225 usage
=$usage"\t--msgr1: use msgr1 only\n"
226 usage
=$usage"\t--msgr2: use msgr2 only\n"
227 usage
=$usage"\t--msgr21: use msgr2 and msgr1\n"
228 usage
=$usage"\t--crimson: use crimson-osd instead of ceph-osd\n"
229 usage
=$usage"\t--osd-args: specify any extra osd specific options\n"
230 usage
=$usage"\t--bluestore-devs: comma-separated list of blockdevs to use for bluestore\n"
231 usage
=$usage"\t--inc-osd: append some more osds into existing vcluster\n"
232 usage
=$usage"\t--cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]\n"
233 usage
=$usage"\t--no-parallel: dont start all OSDs in parallel\n"
240 while [ $# -ge 1 ]; do
252 [ -z "$2" ] && usage_exit
266 if [ "$inc_osd_num" == "" ]; then
298 [ -z "$2" ] && usage_exit
307 [ -z "$2" ] && usage_exit
312 [ -z "$2" ] && usage_exit
317 [ -z "$2" ] && usage_exit
322 [ -z "$2" ] && usage_exit
327 [ -z "$2" ] && usage_exit
361 [ -z "$2" ] && usage_exit
366 cephx
=1 # this is on be default, flag exists for historical consistency
380 if [ ! -r $conf_fn ]; then
381 echo "cannot use old configuration: $conf_fn not readable." >&2
387 objectstore
="memstore"
390 objectstore
="bluestore"
393 objectstore
="filestore"
399 hitset
="$hitset $2 $3"
404 extra_conf
="$extra_conf $2
409 if [ -z "$cache" ]; then
424 with_mgr_dashboard
=false
427 [ -z "$2" ] && usage_exit
433 IFS
=',' read -r -a bluestore_dev
<<< "$2"
434 for dev
in "${bluestore_dev[@]}"; do
435 if [ ! -b $dev -o ! -w $dev ]; then
436 echo "All --bluestore-devs must refer to writable block devices"
448 if [ $kill_all -eq 1 ]; then
449 $SUDO $INIT_CEPH stop
452 if [ "$new" -eq 0 ]; then
453 if [ -z "$CEPH_ASOK_DIR" ]; then
454 CEPH_ASOK_DIR
=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
456 mkdir
-p $CEPH_ASOK_DIR
457 MON
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
459 OSD
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
461 MDS
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
463 MGR
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
465 RGW
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
467 NFS
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
468 GANESHA_DAEMON_NUM
="$NFS"
471 if [ -e "$conf_fn" ]; then
472 asok_dir
=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
474 if [ $asok_dir != /var
/run
/ceph
]; then
475 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
478 if [ -z "$CEPH_ASOK_DIR" ]; then
479 CEPH_ASOK_DIR
=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
490 eval "valg=\$valgrind_$type"
491 [ -z "$valg" ] && valg
="$valgrind"
493 if [ -n "$valg" ]; then
494 prunb valgrind
--tool="$valg" $valgrind_args "$@" -f
497 if [ "$nodaemon" -eq 0 ]; then
499 elif [ "$redirect" -eq 0 ]; then
500 prunb
${CEPH_ROOT}/src
/ceph-run
"$@" -f
502 ( prunb
${CEPH_ROOT}/src
/ceph-run
"$@" -f ) >$CEPH_OUT_DIR/$type.
$num.stdout
2>&1
508 if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
515 lspci
-mm -n -D -d $pci_id | cut
-d ' ' -f 1 |
sed -n $which_pci'p'
518 get_pci_selector_num
() {
519 lspci
-mm -n -D -d $pci_id | cut
-d' ' -f 1 |
wc -l
524 if [ $CEPH_NUM_RGW -eq 0 ]; then
528 # setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
529 # individual rgw's ids will be their ports.
530 current_port
=$CEPH_RGW_PORT
531 for n
in $
(seq 1 $CEPH_NUM_RGW); do
533 [client.rgw.${current_port}]
534 rgw frontends = $rgw_frontend port=${current_port}
535 admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
537 current_port
=$
((current_port
+ 1))
544 log file = $CEPH_OUT_DIR/\$name.log
545 admin socket = $CEPH_ASOK_DIR/\$name.asok
547 pid file = $CEPH_OUT_DIR/\$name.pid
548 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
551 local mgr_modules
="restful iostat"
552 if $with_mgr_dashboard; then
553 mgr_modules
="dashboard $mgr_modules"
557 if [ $msgr -eq 21 ]; then
563 if [ $msgr -eq 2 ]; then
566 ms bind msgr1 = false
569 if [ $msgr -eq 1 ]; then
571 ms bind msgr2 = false
577 ; generated by vstart.sh on `date`
579 num mon = $CEPH_NUM_MON
580 num osd = $CEPH_NUM_OSD
581 num mds = $CEPH_NUM_MDS
582 num mgr = $CEPH_NUM_MGR
583 num rgw = $CEPH_NUM_RGW
584 num ganesha = $GANESHA_DAEMON_NUM
588 osd failsafe full ratio = .99
589 mon osd full ratio = .99
590 mon osd nearfull ratio = .99
591 mon osd backfillfull ratio = .99
592 mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
593 erasure code dir = $EC_PATH
594 plugin dir = $CEPH_LIB
595 filestore fd cache size = 32
596 run dir = $CEPH_OUT_DIR
597 crash dir = $CEPH_OUT_DIR
598 enable experimental unrecoverable data corrupting features = *
599 osd_crush_chooseleaf_type = 0
600 debug asok assert abort = true
604 if [ "$lockdep" -eq 1 ] ; then
609 if [ "$cephx" -eq 1 ] ; then
611 auth cluster required = cephx
612 auth service required = cephx
613 auth client required = cephx
615 elif [ "$gssapi_authx" -eq 1 ] ; then
617 auth cluster required = gss
618 auth service required = gss
619 auth client required = gss
620 gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
624 auth cluster required = none
625 auth service required = none
626 auth client required = none
629 if [ "$short" -eq 1 ]; then
630 COSDSHORT
=" osd max object name len = 460
631 osd max object namespace len = 64"
633 if [ "$objectstore" == "bluestore" ]; then
634 if [ "$spdk_enabled" -eq 1 ]; then
635 if [ "$(get_pci_selector_num)" -eq 0 ]; then
636 echo "Not find the specified NVME device, please check." >&2
639 if [ $
(get_pci_selector_num
) -lt $CEPH_NUM_OSD ]; then
640 echo "OSD number ($CEPH_NUM_OSD) is greater than NVME SSD number ($(get_pci_selector_num)), please check." >&2
643 BLUESTORE_OPTS
=" bluestore_block_db_path = \"\"
644 bluestore_block_db_size = 0
645 bluestore_block_db_create = false
646 bluestore_block_wal_path = \"\"
647 bluestore_block_wal_size = 0
648 bluestore_block_wal_create = false
649 bluestore_spdk_mem = 2048"
651 BLUESTORE_OPTS
=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
652 bluestore block db size = 1073741824
653 bluestore block db create = true
654 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
655 bluestore block wal size = 1048576000
656 bluestore block wal create = true"
661 keyring = $keyring_fn
662 log file = $CEPH_OUT_DIR/\$name.\$pid.log
663 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
666 rgw crypt s3 kms backend = testing
667 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
668 rgw crypt require ssl = false
669 ; uncomment the following to set LC days as the value in seconds;
670 ; needed for passing lc time based s3-tests (can be verbose)
671 ; rgw lc debug interval = 10
686 mds data = $CEPH_DEV_DIR/mds.\$id
687 mds root ino uid = `id -u`
688 mds root ino gid = `id -g`
691 mgr data = $CEPH_DEV_DIR/mgr.\$id
692 mgr module path = $MGR_PYTHON_PATH
693 cephadm path = $CEPH_ROOT/src/cephadm/cephadm
698 osd_check_max_object_name_len_on_startup = false
699 osd data = $CEPH_DEV_DIR/osd\$id
700 osd journal = $CEPH_DEV_DIR/osd\$id/journal
701 osd journal size = 100
703 osd class dir = $OBJCLASS_PATH
704 osd class load list = *
705 osd class default list = *
706 osd fast shutdown = false
708 filestore wbthrottle xfs ios start flusher = 10
709 filestore wbthrottle xfs ios hard limit = 20
710 filestore wbthrottle xfs inodes hard limit = 30
711 filestore wbthrottle btrfs ios start flusher = 10
712 filestore wbthrottle btrfs ios hard limit = 20
713 filestore wbthrottle btrfs inodes hard limit = 30
714 bluestore fsck on mount = true
715 bluestore block create = true
719 kstore fsck on mount = true
720 osd objectstore = $objectstore
724 mgr initial modules = $mgr_modules
728 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
729 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
736 for f
in a b c d e f g h i j k l m n o p q r s t u v w x y z
738 [ $count -eq $CEPH_NUM_MON ] && break;
739 count
=$
(($count + 1))
740 if [ -z "$MONS" ]; then
747 if [ "$new" -eq 1 ]; then
748 if [ `echo $IP | grep '^127\\.'` ]; then
750 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
751 echo " connect. either adjust /etc/hosts, or edit this script to use your"
752 echo " machine's real IP."
756 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon.
"$keyring_fn" --cap mon
'allow *'
757 prun
$SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
758 --cap mon
'allow *' \
759 --cap osd
'allow *' \
760 --cap mds
'allow *' \
761 --cap mgr
'allow *' \
764 # build a fresh fs monmap, mon fs
770 if [ $msgr -eq 1 ]; then
771 A
="v1:$IP:$(($CEPH_PORT+$count+1))"
773 if [ $msgr -eq 2 ]; then
774 A
="v2:$IP:$(($CEPH_PORT+$count+1))"
776 if [ $msgr -eq 21 ]; then
777 A
="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
779 params
+=("--addv" "$f" "$A")
780 mon_host
="$mon_host $A"
784 mon data = $CEPH_DEV_DIR/mon.$f
786 count
=$
(($count + 2))
792 prun
"$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
796 prun
rm -rf -- "$CEPH_DEV_DIR/mon.$f"
797 prun mkdir
-p "$CEPH_DEV_DIR/mon.$f"
798 prun
"$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
801 prun
rm -- "$monmap_fn"
807 run
'mon' $f $CEPH_BIN/ceph-mon
-i $f $ARGS $CMON_ARGS
812 if [ $inc_osd_num -gt 0 ]; then
813 old_maxosd
=$
($CEPH_BIN/ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
815 end
=$
(($start-1+$inc_osd_num))
816 overwrite_conf
=1 # fake wconf
819 end
=$
(($CEPH_NUM_OSD-1))
822 for osd
in `seq $start $end`
824 local extra_seastar_args
825 if [ "$ceph_osd" == "crimson-osd" ]; then
826 # designate a single CPU node $osd for osd.$osd
827 extra_seastar_args
="--smp 1 --cpuset $osd"
828 if [ "$debug" -ne 0 ]; then
829 extra_seastar_args
+=" --debug"
832 if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
837 if [ "$spdk_enabled" -eq 1 ]; then
839 bluestore_block_path = spdk:$(get_pci_selector $((osd+1)))
843 rm -rf $CEPH_DEV_DIR/osd
$osd || true
844 if command -v btrfs
> /dev
/null
; then
845 for f
in $CEPH_DEV_DIR/osd
$osd/*; do btrfs sub delete
$f &> /dev
/null || true
; done
847 if [ -n "$filestore_path" ]; then
848 ln -s $filestore_path $CEPH_DEV_DIR/osd
$osd
849 elif [ -n "$kstore_path" ]; then
850 ln -s $kstore_path $CEPH_DEV_DIR/osd
$osd
852 mkdir
-p $CEPH_DEV_DIR/osd
$osd
853 if [ -n "${bluestore_dev[$osd]}" ]; then
854 dd if=/dev
/zero of
=${bluestore_dev[$osd]} bs
=1M count
=1
855 ln -s ${bluestore_dev[$osd]} $CEPH_DEV_DIR/osd
$osd/block
857 bluestore fsck on mount = false
863 echo "add osd$osd $uuid"
864 OSD_SECRET
=$
($CEPH_BIN/ceph-authtool
--gen-print-key)
865 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd
$osd/new.json
866 ceph_adm osd new
$uuid -i $CEPH_DEV_DIR/osd
$osd/new.json
867 rm $CEPH_DEV_DIR/osd
$osd/new.json
868 $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args
870 local key_fn
=$CEPH_DEV_DIR/osd
$osd/keyring
878 run
'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
879 $extra_seastar_args $extra_osd_args \
880 -i $osd $ARGS $COSD_ARGS &
889 for p
in $osds_wait; do
892 debug
echo OSDs started
894 if [ $inc_osd_num -gt 0 ]; then
896 new_maxosd
=$
($CEPH_BIN/ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
897 sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
903 local ssl
=${DASHBOARD_SSL:-1}
904 # avoid monitors on nearby ports (which test/*.sh use extensively)
905 MGR_PORT
=$
(($CEPH_PORT + 1000))
907 for name
in x y z a b c d e f g h i j k l m n o p
909 [ $mgr -eq $CEPH_NUM_MGR ] && break
911 if [ "$new" -eq 1 ]; then
912 mkdir
-p $CEPH_DEV_DIR/mgr.
$name
913 key_fn
=$CEPH_DEV_DIR/mgr.
$name/keyring
914 $SUDO $CEPH_BIN/ceph-authtool
--create-keyring --gen-key --name=mgr.
$name $key_fn
915 ceph_adm
-i $key_fn auth add mgr.
$name mon
'allow profile mgr' mds
'allow *' osd
'allow *'
922 if $with_mgr_dashboard ; then
923 local port_option
="ssl_server_port"
924 local http_proto
="https"
925 if [ "$ssl" == "0" ]; then
926 port_option
="server_port"
928 ceph_adm config
set mgr mgr
/dashboard
/ssl false
--force
930 ceph_adm config
set mgr mgr
/dashboard
/$name/$port_option $MGR_PORT --force
931 if [ $mgr -eq 1 ]; then
932 DASH_URLS
="$http_proto://$IP:$MGR_PORT"
934 DASH_URLS
+=", $http_proto://$IP:$MGR_PORT"
937 MGR_PORT
=$
(($MGR_PORT + 1000))
938 ceph_adm config
set mgr mgr
/prometheus
/$name/server_port
$PROMETHEUS_PORT --force
939 PROMETHEUS_PORT
=$
(($PROMETHEUS_PORT + 1000))
941 ceph_adm config
set mgr mgr
/restful
/$name/server_port
$MGR_PORT --force
942 if [ $mgr -eq 1 ]; then
943 RESTFUL_URLS
="https://$IP:$MGR_PORT"
945 RESTFUL_URLS
+=", https://$IP:$MGR_PORT"
947 MGR_PORT
=$
(($MGR_PORT + 1000))
950 debug
echo "Starting mgr.${name}"
951 run
'mgr' $name $CEPH_BIN/ceph-mgr
-i $name $ARGS
954 if [ "$new" -eq 1 ]; then
955 # setting login credentials for dashboard
956 if $with_mgr_dashboard; then
957 while ! ceph_adm
-h |
grep -c -q ^dashboard
; do
958 debug
echo 'waiting for mgr dashboard module to start'
961 ceph_adm dashboard ac-user-create
--force-password admin admin administrator
962 if [ "$ssl" != "0" ]; then
963 if ! ceph_adm dashboard create-self-signed-cert
; then
964 debug
echo dashboard module not working correctly
!
969 while ! ceph_adm
-h |
grep -c -q ^restful
; do
970 debug
echo 'waiting for mgr restful module to start'
973 if ceph_adm restful create-self-signed-cert
; then
975 ceph_adm restful create-key admin
-o $SF
976 RESTFUL_SECRET
=`cat $SF`
979 debug
echo MGR Restful is not working
, perhaps the package is not installed?
983 if [ "$cephadm" -eq 1 ]; then
984 debug
echo Enabling cephadm orchestrator
985 if [ "$new" -eq 1 ]; then
987 https
://registry.hub.docker.com
/v
2/repositories
/ceph
/daemon-base
/tags
/latest-master-devel \
988 | jq
-r '.images[].digest')
989 ceph_adm config
set global container_image
"docker.io/ceph/daemon-base@$digest"
991 ceph_adm config-key
set mgr
/cephadm
/ssh_identity_key
-i ~
/.ssh
/id_rsa
992 ceph_adm config-key
set mgr
/cephadm
/ssh_identity_pub
-i ~
/.ssh
/id_rsa.pub
993 ceph_adm mgr module
enable cephadm
994 ceph_adm orch
set backend cephadm
995 ceph_adm orch
host add
"$(hostname)"
996 ceph_adm orch apply crash
'*'
997 ceph_adm config
set mgr mgr
/cephadm
/allow_ptrace true
1003 for name
in a b c d e f g h i j k l m n o p
1005 [ $mds -eq $CEPH_NUM_MDS ] && break
1008 if [ "$new" -eq 1 ]; then
1009 prun mkdir
-p "$CEPH_DEV_DIR/mds.$name"
1010 key_fn
=$CEPH_DEV_DIR/mds.
$name/keyring
1015 if [ "$standby" -eq 1 ]; then
1016 mkdir
-p $CEPH_DEV_DIR/mds.
${name}s
1018 mds standby for rank = $mds
1020 mds standby replay = true
1021 mds standby for name = ${name}
1024 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
1025 ceph_adm
-i "$key_fn" auth add
"mds.$name" mon
'allow profile mds' osd
'allow rw tag cephfs *=*' mds
'allow' mgr
'allow profile mds'
1026 if [ "$standby" -eq 1 ]; then
1027 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
1028 "$CEPH_DEV_DIR/mds.${name}s/keyring"
1029 ceph_adm
-i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add
"mds.${name}s" \
1030 mon
'allow profile mds' osd
'allow *' mds
'allow' mgr
'allow profile mds'
1034 run
'mds' $name $CEPH_BIN/ceph-mds
-i $name $ARGS $CMDS_ARGS
1035 if [ "$standby" -eq 1 ]; then
1036 run
'mds' $name $CEPH_BIN/ceph-mds
-i ${name}s
$ARGS $CMDS_ARGS
1039 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
1040 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
1041 #ceph_adm mds set max_mds 2
1044 if [ $new -eq 1 ]; then
1045 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
1046 sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
1047 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
1048 ceph_adm fs flag
set enable_multiple true
--yes-i-really-mean-it
1051 # wait for volume module to load
1052 while ! ceph_adm fs volume
ls ; do sleep 1 ; done
1054 for name
in a b c d e f g h i j k l m n o p
1056 ceph_adm fs volume create
${name}
1057 ceph_adm fs authorize
${name} "client.fs_${name}" / rwp
>> "$keyring_fn"
1059 [ $fs -eq $CEPH_NUM_FS ] && break
1066 # Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
1067 # nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
1068 # Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
1069 # the packages are available at
1070 # https://wiki.centos.org/SpecialInterestGroup/Storage
1071 # Similarly for Ubuntu>=16.04 follow the instructions on
1072 # https://launchpad.net/~nfs-ganesha
1076 GANESHA_PORT
=$
(($CEPH_PORT + 4000))
1078 test_user
="ganesha-$cluster_id"
1079 pool_name
="nfs-ganesha"
1080 namespace
=$cluster_id
1081 url
="rados://$pool_name/$namespace/conf-nfs.$test_user"
1083 prun ceph_adm auth get-or-create client.
$test_user \
1085 osd
"allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
1086 mds
"allow rw path=/" \
1089 ceph_adm mgr module
enable test_orchestrator
1090 ceph_adm orch
set backend test_orchestrator
1091 ceph_adm test_orchestrator load_data
-i $CEPH_ROOT/src
/pybind
/mgr
/test_orchestrator
/dummy_data.json
1092 prun ceph_adm nfs cluster create cephfs
$cluster_id
1093 prun ceph_adm nfs
export create cephfs
"a" $cluster_id "/cephfs"
1095 for name
in a b c d e f g h i j k l m n o p
1097 [ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
1099 port
=$
(($GANESHA_PORT + ganesha
))
1100 ganesha
=$
(($ganesha + 1))
1101 ganesha_dir
="$CEPH_DEV_DIR/ganesha.$name"
1102 prun
rm -rf $ganesha_dir
1103 prun mkdir
-p $ganesha_dir
1105 echo "NFS_CORE_PARAM {
1107 Enable_RQUOTA = false;
1117 RecoveryBackend = rados_cluster;
1118 Minor_Versions = 1, 2;
1125 namespace = $namespace;
1126 UserId = $test_user;
1131 Userid = $test_user;
1132 watch_url = \"$url\";
1133 }" > "$ganesha_dir/ganesha-$name.conf"
1139 ganesha data = $ganesha_dir
1140 pid file = $ganesha_dir/ganesha-$name.pid
1143 prun env CEPH_CONF
="${conf_fn}" ganesha-rados-grace
--userid $test_user -p $pool_name -n $namespace add
$name
1144 prun env CEPH_CONF
="${conf_fn}" ganesha-rados-grace
--userid $test_user -p $pool_name -n $namespace
1146 prun env CEPH_CONF
="${conf_fn}" ganesha.nfsd
-L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
1148 # Wait few seconds for grace period to be removed
1151 prun env CEPH_CONF
="${conf_fn}" ganesha-rados-grace
--userid $test_user -p $pool_name -n $namespace
1153 if $with_mgr_dashboard; then
1154 $CEPH_BIN/rados
-p $pool_name put
"conf-$name" "$ganesha_dir/ganesha-$name.conf"
1157 echo "$test_user ganesha daemon $name started on port: $port"
1160 if $with_mgr_dashboard; then
1161 ceph_adm dashboard set-ganesha-clusters-rados-pool-namespace
$pool_name
1165 if [ "$debug" -eq 0 ]; then
1170 debug
echo "** going verbose **"
1179 if [ -n "$MON_ADDR" ]; then
1180 CMON_ARGS
=" -m "$MON_ADDR
1181 COSD_ARGS
=" -m "$MON_ADDR
1182 CMDS_ARGS
=" -m "$MON_ADDR
1185 if [ -z "$CEPH_PORT" ]; then
1188 CEPH_PORT
="$(echo $(( RANDOM % 1000 + 40000 )))"
1189 ss
-a -n |
egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev
/null
2>&1 ||
break
1193 [ -z "$INIT_CEPH" ] && INIT_CEPH
=$CEPH_BIN/init-ceph
1196 [ -d $CEPH_DEV_DIR/osd
0/.
] && [ -e $CEPH_DEV_DIR/sudo
] && SUDO
="sudo"
1198 if [ $inc_osd_num -eq 0 ]; then
1199 prun
$SUDO rm -f core
*
1202 [ -d $CEPH_ASOK_DIR ] || mkdir
-p $CEPH_ASOK_DIR
1203 [ -d $CEPH_OUT_DIR ] || mkdir
-p $CEPH_OUT_DIR
1204 [ -d $CEPH_DEV_DIR ] || mkdir
-p $CEPH_DEV_DIR
1205 if [ $inc_osd_num -eq 0 ]; then
1206 $SUDO rm -rf $CEPH_OUT_DIR/*
1208 [ -d gmon
] && $SUDO rm -rf gmon
/*
1210 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
1213 # figure machine's ip
1214 HOSTNAME
=`hostname -s`
1215 if [ -n "$ip" ]; then
1218 echo hostname
$HOSTNAME
1219 if [ -x "$(which ip 2>/dev/null)" ]; then
1224 # filter out IPv4 and localhost addresses
1225 IP
="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
1226 # if nothing left, try using localhost address, it might work
1227 if [ -z "$IP" ]; then IP
="127.0.0.1"; fi
1230 echo "port $CEPH_PORT"
1233 [ -z $CEPH_ADM ] && CEPH_ADM
=$CEPH_BIN/ceph
1236 if [ "$cephx" -eq 1 ]; then
1237 prun
$SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
1239 prun
$SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
1243 if [ $inc_osd_num -gt 0 ]; then
1248 if [ "$new" -eq 1 ]; then
1252 if [ $CEPH_NUM_MON -gt 0 ]; then
1255 debug
echo Populating config ...
1256 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1258 osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
1259 osd_pool_default_min_size = 1
1262 mon_osd_reporter_subtree_level = osd
1263 mon_data_avail_warn = 2
1264 mon_data_avail_crit = 1
1265 mon_allow_pool_delete = true
1268 osd_scrub_load_threshold = 2000
1269 osd_debug_op_order = true
1270 osd_debug_misdirected_ops = true
1271 osd_copyfrom_max_chunk = 524288
1274 mds_debug_frag = true
1275 mds_debug_auth_pins = true
1276 mds_debug_subtrees = true
1279 mgr/telemetry/nag = false
1280 mgr/telemetry/enable = false
1284 if [ "$debug" -ne 0 ]; then
1285 debug
echo Setting debug configs ...
1286 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1300 debug_filestore = 20
1301 debug_bluestore = 20
1313 mds_debug_scatterstat = true
1314 mds_verify_scatter = true
1317 if [ "$cephadm" -gt 0 ]; then
1318 debug
echo Setting mon public_network ...
1319 public_network
=$
(ip route list |
grep -w "$IP" |
awk '{print $1}')
1320 ceph_adm config
set mon public_network
$public_network
1324 if [ $CEPH_NUM_MGR -gt 0 ]; then
1329 if [ $CEPH_NUM_OSD -gt 0 ]; then
1334 if [ "$smallmds" -eq 1 ]; then
1337 mds log max segments = 2
1338 # Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
1339 mds cache memory limit = 100M
1343 if [ $CEPH_NUM_MDS -gt 0 ]; then
1345 # key with access to all FS
1346 ceph_adm fs authorize \
* "client.fs" / rwp
>> "$keyring_fn"
1349 # Don't set max_mds until all the daemons are started, otherwise
1350 # the intended standbys might end up in active roles.
1351 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1352 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
1355 for name
in a b c d e f g h i j k l m n o p
1357 [ $fs -eq $CEPH_NUM_FS ] && break
1359 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1360 ceph_adm fs
set "${name}" max_mds
"$CEPH_MAX_MDS"
1366 if [ "$ec" -eq 1 ]; then
1368 osd erasure-code-profile set ec-profile m=2 k=2
1369 osd pool create ec erasure ec-profile
1374 if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
1375 pseudo_path
="/cephfs"
1376 if [ "$cephadm" -gt 0 ]; then
1378 prun ceph_adm nfs cluster create cephfs
$cluster_id
1379 prun ceph_adm nfs
export create cephfs
"a" $cluster_id $pseudo_path
1383 port
="<ganesha-port-num>"
1385 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1389 while [ -n "$*" ]; do
1392 debug
echo "creating cache for pool $p ..."
1394 osd pool create ${p}-cache
1395 osd tier add $p ${p}-cache
1396 osd tier cache-mode ${p}-cache writeback
1397 osd tier set-overlay $p ${p}-cache
1404 while [ -n "$*" ]; do
1409 debug
echo "setting hit_set on pool $pool type $type ..."
1411 osd pool set $pool hit_set_type $type
1412 osd pool set $pool hit_set_count 8
1413 osd pool set $pool hit_set_period 30
1419 do_rgw_create_users
()
1422 local akey
='0555b35654ad1656d804'
1423 local skey
='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
1424 debug
echo "setting up user testid"
1425 $CEPH_BIN/radosgw-admin user create
--uid testid
--access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com
-c $conf_fn > /dev
/null
1427 # Create S3-test users
1428 # See: https://github.com/ceph/s3-tests
1429 debug
echo "setting up s3-test users"
1430 $CEPH_BIN/radosgw-admin user create \
1431 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1432 --access-key ABCDEFGHIJKLMNOPQRST \
1433 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
1434 --display-name youruseridhere \
1435 --email s3@example.com
-c $conf_fn > /dev
/null
1436 $CEPH_BIN/radosgw-admin user create \
1437 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
1438 --access-key NOPQRSTUVWXYZABCDEFG \
1439 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
1440 --display-name john.doe \
1441 --email john.doe@example.com
-c $conf_fn > /dev
/null
1442 $CEPH_BIN/radosgw-admin user create \
1444 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1445 --access-key HIJKLMNOPQRSTUVWXYZA \
1446 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
1447 --display-name tenanteduser \
1448 --email tenanteduser@example.com
-c $conf_fn > /dev
/null
1451 debug
echo "setting up user tester"
1452 $CEPH_BIN/radosgw-admin user create
-c $conf_fn --subuser=test:tester
--display-name=Tester-Subuser
--key-type=swift
--secret=testing
--access=full
> /dev
/null
1455 echo "S3 User Info:"
1456 echo " access key: $akey"
1457 echo " secret key: $skey"
1459 echo "Swift User Info:"
1460 echo " account : test"
1461 echo " user : tester"
1462 echo " password : testing"
1468 if [ "$new" -eq 1 ]; then
1470 if [ -n "$rgw_compression" ]; then
1471 debug
echo "setting compression type=$rgw_compression"
1472 $CEPH_BIN/radosgw-admin zone placement modify
-c $conf_fn --rgw-zone=default
--placement-id=default-placement
--compression=$rgw_compression > /dev
/null
1477 if [ "$debug" -ne 0 ]; then
1478 RGWDEBUG
="--debug-rgw=20 --debug-ms=1"
1481 local CEPH_RGW_PORT_NUM
="${CEPH_RGW_PORT}"
1482 local CEPH_RGW_HTTPS
="${CEPH_RGW_PORT: -1}"
1483 if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
1484 CEPH_RGW_PORT_NUM
="${CEPH_RGW_PORT::-1}"
1489 [ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO
=sudo
1491 current_port
=$CEPH_RGW_PORT
1492 for n
in $
(seq 1 $CEPH_NUM_RGW); do
1493 rgw_name
="client.rgw.${current_port}"
1495 ceph_adm auth get-or-create
$rgw_name \
1501 debug
echo start rgw on http
${CEPH_RGW_HTTPS}://localhost
:${current_port}
1502 run
'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw
-c $conf_fn \
1503 --log-file=${CEPH_OUT_DIR}/radosgw.
${current_port}.log \
1504 --admin-socket=${CEPH_OUT_DIR}/radosgw.
${current_port}.asok \
1505 --pid-file=${CEPH_OUT_DIR}/radosgw.
${current_port}.pid \
1508 "--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}"
1511 [ $i -eq $CEPH_NUM_RGW ] && break
1513 current_port
=$
((current_port
+1))
1516 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1520 debug
echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
1523 if [ "$new" -eq 1 ]; then
1524 if $with_mgr_dashboard; then
1525 echo "dashboard urls: $DASH_URLS"
1526 echo " w/ user/pass: admin / admin"
1528 echo "restful urls: $RESTFUL_URLS"
1529 echo " w/ user/pass: admin / $RESTFUL_SECRET"
1533 # add header to the environment file
1536 echo "# source this file into your shell to set up the environment."
1537 echo "# For example:"
1538 echo "# $ . $CEPH_DIR/vstart_environment.sh"
1540 } > $CEPH_DIR/vstart_environment.sh
1542 echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
1543 echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
1545 if [ "$CEPH_DIR" != "$PWD" ]; then
1546 echo "export CEPH_CONF=$conf_fn"
1547 echo "export CEPH_KEYRING=$keyring_fn"
1550 if [ -n "$CEPHFS_SHELL" ]; then
1551 echo "alias cephfs-shell=$CEPHFS_SHELL"
1553 } |
tee -a $CEPH_DIR/vstart_environment.sh
1557 # always keep this section at the very bottom of this file
1558 STRAY_CONF_PATH
="/etc/ceph/ceph.conf"
1559 if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
1563 echo " Please remove stray $STRAY_CONF_PATH if not needed."
1564 echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
1565 echo " and may lead to undesired results."
1568 echo " Remember to restart cluster after removing $STRAY_CONF_PATH"