2 # -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
3 # vim: softtabstop=4 shiftwidth=4 expandtab
10 if [[ "$s" =~ \
]]; then
11 printf -- "'%s' " "$s"
24 debug quoted_print
"$@" '&'
25 PATH
=$CEPH_BIN:$PATH "$@" &
29 debug quoted_print
"$@"
30 PATH
=$CEPH_BIN:$PATH "$@"
34 if [ -n "$VSTART_DEST" ]; then
36 SRC_PATH
=`(cd $SRC_PATH; pwd)`
39 CEPH_BIN
=${CEPH_BIN:-${PWD}/bin}
40 CEPH_LIB
=${CEPH_LIB:-${PWD}/lib}
42 CEPH_CONF_PATH
=$VSTART_DEST
43 CEPH_DEV_DIR
=$VSTART_DEST/dev
44 CEPH_OUT_DIR
=$VSTART_DEST/out
45 CEPH_ASOK_DIR
=$VSTART_DEST/asok
46 CEPH_OUT_CLIENT_DIR
=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
49 get_cmake_variable
() {
51 grep "${variable}:" CMakeCache.txt | cut
-d "=" -f 2
54 # for running out of the CMake build directory
55 if [ -e CMakeCache.txt
]; then
56 # Out of tree build, learn source location from CMakeCache.txt
57 CEPH_ROOT
=$
(get_cmake_variable ceph_SOURCE_DIR
)
59 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH
=$CEPH_ROOT/src
/pybind
/mgr
62 # use CEPH_BUILD_ROOT to vstart from a 'make install'
63 if [ -n "$CEPH_BUILD_ROOT" ]; then
64 [ -z "$CEPH_BIN" ] && CEPH_BIN
=$CEPH_BUILD_ROOT/bin
65 [ -z "$CEPH_LIB" ] && CEPH_LIB
=$CEPH_BUILD_ROOT/lib
66 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB
=$CEPH_BUILD_ROOT/external
/lib
67 [ -z "$EC_PATH" ] && EC_PATH
=$CEPH_LIB/erasure-code
68 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH
=$CEPH_LIB/rados-classes
69 # make install should install python extensions into PYTHONPATH
70 elif [ -n "$CEPH_ROOT" ]; then
71 [ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL
=$CEPH_ROOT/src
/tools
/cephfs
/shell
/cephfs-shell
72 [ -z "$PYBIND" ] && PYBIND
=$CEPH_ROOT/src
/pybind
73 [ -z "$CEPH_BIN" ] && CEPH_BIN
=$CEPH_BUILD_DIR/bin
74 [ -z "$CEPH_ADM" ] && CEPH_ADM
=$CEPH_BIN/ceph
75 [ -z "$INIT_CEPH" ] && INIT_CEPH
=$CEPH_BIN/init-ceph
76 [ -z "$CEPH_LIB" ] && CEPH_LIB
=$CEPH_BUILD_DIR/lib
77 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB
=$CEPH_BUILD_DIR/external
/lib
78 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH
=$CEPH_LIB
79 [ -z "$EC_PATH" ] && EC_PATH
=$CEPH_LIB
80 [ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON
=$CEPH_ROOT/src
/python-common
83 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
87 [ -z "$PYBIND" ] && PYBIND
=.
/pybind
89 [ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON
="$CEPH_PYTHON_COMMON:"
90 CYTHON_PYTHONPATH
="$CEPH_LIB/cython_modules/lib.3"
91 export PYTHONPATH
=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
93 export LD_LIBRARY_PATH
=$CEPH_LIB:$CEPH_EXT_LIB:$LD_LIBRARY_PATH
94 export DYLD_LIBRARY_PATH
=$CEPH_LIB:$CEPH_EXT_LIB:$DYLD_LIBRARY_PATH
95 # Suppress logging for regular use that indicated that we are using a
96 # development version. vstart.sh is only used during testing and
100 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON
="$MON"
101 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD
="$OSD"
102 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS
="$MDS"
103 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR
="$MGR"
104 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS
="$FS"
105 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW
="$RGW"
106 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM
="$NFS"
108 # if none of the CEPH_NUM_* number is specified, kill the existing
110 if [ -z "$CEPH_NUM_MON" -a \
111 -z "$CEPH_NUM_OSD" -a \
112 -z "$CEPH_NUM_MDS" -a \
113 -z "$CEPH_NUM_MGR" -a \
114 -z "$GANESHA_DAEMON_NUM" ]; then
120 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON
=3
121 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD
=3
122 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS
=3
123 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR
=1
124 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS
=1
125 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS
=1
126 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW
=0
127 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM
=0
129 [ -z "$CEPH_DIR" ] && CEPH_DIR
="$PWD"
130 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR
="$CEPH_DIR/dev"
131 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR
="$CEPH_DIR/out"
132 [ -z "$CEPH_ASOK_DIR" ] && CEPH_ASOK_DIR
="$CEPH_DIR/asok"
133 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT
=8000
134 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH
=$CEPH_DIR
135 CEPH_OUT_CLIENT_DIR
=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
137 if [ $CEPH_NUM_OSD -gt 3 ]; then
138 OSD_POOL_DEFAULT_SIZE
=3
140 OSD_POOL_DEFAULT_SIZE
=$CEPH_NUM_OSD
160 cephx
=1 #turn cephx on by default
163 if [ `uname` = FreeBSD
]; then
164 objectstore
="memstore"
166 objectstore
="bluestore"
171 lockdep
=${LOCKDEP:-1}
172 spdk_enabled
=0 # disable SPDK by default
178 with_mgr_dashboard
=true
179 if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
180 [[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
181 debug
echo "ceph-mgr dashboard not built - disabling."
182 with_mgr_dashboard
=false
184 with_mgr_restful
=false
187 declare -a block_devs
188 declare -a secondary_block_devs
189 secondary_block_devs_type
="SSD"
191 VSTART_SEC
="client.vstart.sh"
197 conf_fn
="$CEPH_CONF_PATH/ceph.conf"
198 keyring_fn
="$CEPH_CONF_PATH/keyring"
199 monmap_fn
="/tmp/ceph_monmap.$$"
204 read -r -d '' usage
<<EOF || true
205 usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d
209 -s, --standby_mds: Generate standby-replay MDS for each active
210 -l, --localhost: use localhost instead of hostname
211 -i <ip>: bind to specific ip
213 --valgrind[_{osd,mds,mon,rgw}] 'toolname args...'
214 --nodaemon: use ceph-run as wrapper for mon/osd/mds
215 --redirect-output: only useful with nodaemon, directs output to log file
216 --smallmds: limit mds cache memory limit
217 -m ip:port specify monitor address
218 -k keep old configuration files (default)
219 -x enable cephx (on by default)
221 -g --gssapi enable Kerberos/GSSApi authentication
222 -G disable Kerberos/GSSApi authentication
223 --hitset <pool> <hit_set_type>: enable hitset tracking
224 -e : create an erasure pool
225 -o config add extra config parameters to all sections
226 --rgw_port specify ceph rgw http listen port
227 --rgw_frontend specify the rgw frontend configuration
228 --rgw_arrow_flight start arrow flight frontend
229 --rgw_compression specify the rgw compression plugin
230 --seastore use seastore as crimson osd backend
231 -b, --bluestore use bluestore as the osd objectstore backend (default)
232 -K, --kstore use kstore as the osd objectstore backend
233 --cyanstore use cyanstore as the osd objectstore backend
234 --memstore use memstore as the osd objectstore backend
235 --cache <pool>: enable cache tiering on pool
236 --short: short object names only; necessary for ext4 dev
237 --nolockdep disable lockdep
238 --multimds <count> allow multimds with maximum active count
239 --without-dashboard: do not run using mgr dashboard
240 --bluestore-spdk: enable SPDK and with a comma-delimited list of PCI-IDs of NVME device (e.g, 0000:81:00.0)
241 --bluestore-pmem: enable PMEM and with path to a file mapped to PMEM
242 --msgr1: use msgr1 only
243 --msgr2: use msgr2 only
244 --msgr21: use msgr2 and msgr1
245 --crimson: use crimson-osd instead of ceph-osd
246 --crimson-foreground: use crimson-osd, but run it in the foreground
247 --osd-args: specify any extra osd specific options
248 --bluestore-devs: comma-separated list of blockdevs to use for bluestore
249 --bluestore-zoned: blockdevs listed by --bluestore-devs are zoned devices (HM-SMR HDD or ZNS SSD)
250 --bluestore-io-uring: enable io_uring backend
251 --inc-osd: append some more osds into existing vcluster
252 --cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]
253 --no-parallel: dont start all OSDs in parallel
254 --no-restart: dont restart process when using ceph-run
255 --jaeger: use jaegertracing for tracing
256 --seastore-device-size: set total size of seastore
257 --seastore-devs: comma-separated list of blockdevs to use for seastore
258 --seastore-secondary-devs: comma-separated list of secondary blockdevs to use for seastore
259 --seastore-secondary-devs-type: device type of all secondary blockdevs. HDD, SSD(default), ZNS or RANDOM_BLOCK_SSD
260 --crimson-smp: number of cores to use for crimson
275 IFS
=',' read -r -a block_devs
<<< "$devs"
276 for dev
in "${block_devs[@]}"; do
277 if [ ! -b $dev ] ||
[ ! -w $dev ]; then
278 echo "All $opt_name must refer to writable block devices"
284 parse_secondary_devs
() {
290 IFS
=',' read -r -a secondary_block_devs
<<< "$devs"
291 for dev
in "${secondary_block_devs[@]}"; do
292 if [ ! -b $dev ] ||
[ ! -w $dev ]; then
293 echo "All $opt_name must refer to writable block devices"
300 while [ $# -ge 1 ]; do
315 [ -z "$2" ] && usage_exit
329 if [ "$inc_osd_num" == "" ]; then
344 --crimson-foreground)
373 [ -z "$2" ] && usage_exit
382 [ -z "$2" ] && usage_exit
387 [ -z "$2" ] && usage_exit
392 [ -z "$2" ] && usage_exit
397 [ -z "$2" ] && usage_exit
402 [ -z "$2" ] && usage_exit
424 rgw_flight_frontend
="yes"
435 [ -z "$2" ] && usage_exit
440 cephx
=1 # this is on be default, flag exists for historical consistency
454 if [ ! -r $conf_fn ]; then
455 echo "cannot use old configuration: $conf_fn not readable." >&2
461 objectstore
="memstore"
464 objectstore
="cyanstore"
467 objectstore
="seastore"
470 objectstore
="bluestore"
476 hitset
="$hitset $2 $3"
481 extra_conf
+=$
'\n'"$2"
485 if [ -z "$cache" ]; then
500 with_mgr_dashboard
=false
503 with_mgr_restful
=true
505 --seastore-device-size)
510 parse_block_devs
--seastore-devs "$2"
513 --seastore-secondary-devs)
514 parse_secondary_devs
--seastore-devs "$2"
517 --seastore-secondary-devs-type)
518 secondary_block_devs_type
="$2"
526 [ -z "$2" ] && usage_exit
527 IFS
=',' read -r -a bluestore_spdk_dev
<<< "$2"
532 [ -z "$2" ] && usage_exit
533 bluestore_pmem_file
="$2"
538 parse_block_devs
--bluestore-devs "$2"
544 --bluestore-io-uring)
550 echo "with_jaeger $with_jaeger"
558 if [ $kill_all -eq 1 ]; then
559 $SUDO $INIT_CEPH stop
562 if [ "$new" -eq 0 ]; then
563 if [ -z "$CEPH_ASOK_DIR" ]; then
564 CEPH_ASOK_DIR
=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
566 mkdir
-p $CEPH_ASOK_DIR
567 MON
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
569 OSD
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
571 MDS
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
573 MGR
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
575 RGW
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
577 NFS
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
578 GANESHA_DAEMON_NUM
="$NFS"
581 if [ -e "$conf_fn" ]; then
582 asok_dir
=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
584 if [ $asok_dir != /var
/run
/ceph
]; then
585 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
588 if [ -z "$CEPH_ASOK_DIR" ]; then
589 CEPH_ASOK_DIR
=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
600 eval "valg=\$valgrind_$type"
601 [ -z "$valg" ] && valg
="$valgrind"
603 if [ -n "$valg" ]; then
604 prunb valgrind
--tool="$valg" $valgrind_args "$@" -f
607 if [ "$nodaemon" -eq 0 ]; then
610 if [ "$restart" -eq 0 ]; then
611 set -- '--no-restart' "$@"
613 if [ "$redirect" -eq 0 ]; then
614 prunb
${CEPH_ROOT}/src
/ceph-run
"$@" -f
616 ( prunb
${CEPH_ROOT}/src
/ceph-run
"$@" -f ) >$CEPH_OUT_DIR/$type.
$num.stdout
2>&1
623 if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
631 if [ $CEPH_NUM_RGW -eq 0 ]; then
635 # setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
636 # individual rgw's ids will be their ports.
637 current_port
=$CEPH_RGW_PORT
638 # allow only first rgw to start arrow_flight server/port
639 local flight_conf
=$rgw_flight_frontend
640 for n
in $
(seq 1 $CEPH_NUM_RGW); do
642 [client.rgw.${current_port}]
643 rgw frontends = $rgw_frontend port=${current_port}${flight_conf:+,arrow_flight}
644 admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
645 debug rgw_flight = 20
647 current_port
=$
((current_port
+ 1))
658 while read -r opt
; do
659 if [ -z "$formatted" ]; then
662 formatted
+=$
'\n'${indent}${opt}
670 log file = $CEPH_OUT_DIR/\$name.log
671 admin socket = $CEPH_ASOK_DIR/\$name.asok
673 pid file = $CEPH_OUT_DIR/\$name.pid
674 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
677 local mgr_modules
="iostat nfs"
678 if $with_mgr_dashboard; then
679 mgr_modules
+=" dashboard"
681 if $with_mgr_restful; then
682 mgr_modules
+=" restful"
686 if [ $msgr -eq 21 ]; then
687 msgr_conf
="ms bind msgr2 = true
688 ms bind msgr1 = true"
690 if [ $msgr -eq 2 ]; then
691 msgr_conf
="ms bind msgr2 = true
692 ms bind msgr1 = false"
694 if [ $msgr -eq 1 ]; then
695 msgr_conf
="ms bind msgr2 = false
696 ms bind msgr1 = true"
700 ; generated by vstart.sh on `date`
702 num mon = $CEPH_NUM_MON
703 num osd = $CEPH_NUM_OSD
704 num mds = $CEPH_NUM_MDS
705 num mgr = $CEPH_NUM_MGR
706 num rgw = $CEPH_NUM_RGW
707 num ganesha = $GANESHA_DAEMON_NUM
711 osd failsafe full ratio = .99
712 mon osd full ratio = .99
713 mon osd nearfull ratio = .99
714 mon osd backfillfull ratio = .99
715 mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
716 erasure code dir = $EC_PATH
717 plugin dir = $CEPH_LIB
718 run dir = $CEPH_OUT_DIR
719 crash dir = $CEPH_OUT_DIR
720 enable experimental unrecoverable data corrupting features = *
721 osd_crush_chooseleaf_type = 0
722 debug asok assert abort = true
723 $(format_conf "${msgr_conf}")
724 $(format_conf "${extra_conf}")
727 if [ "$with_jaeger" -eq 1 ] ; then
729 jaeger_agent_port = 6831
732 if [ "$lockdep" -eq 1 ] ; then
737 if [ "$cephx" -eq 1 ] ; then
739 auth cluster required = cephx
740 auth service required = cephx
741 auth client required = cephx
743 elif [ "$gssapi_authx" -eq 1 ] ; then
745 auth cluster required = gss
746 auth service required = gss
747 auth client required = gss
748 gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
752 auth cluster required = none
753 auth service required = none
754 auth client required = none
755 ms mon client mode = crc
758 if [ "$short" -eq 1 ]; then
759 COSDSHORT
=" osd max object name len = 460
760 osd max object namespace len = 64"
762 if [ "$objectstore" == "bluestore" ]; then
763 if [ "$spdk_enabled" -eq 1 ] ||
[ "$pmem_enabled" -eq 1 ]; then
764 BLUESTORE_OPTS
=" bluestore_block_db_path = \"\"
765 bluestore_block_db_size = 0
766 bluestore_block_db_create = false
767 bluestore_block_wal_path = \"\"
768 bluestore_block_wal_size = 0
769 bluestore_block_wal_create = false
770 bluestore_spdk_mem = 2048"
772 BLUESTORE_OPTS
=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
773 bluestore block db size = 1073741824
774 bluestore block db create = true
775 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
776 bluestore block wal size = 1048576000
777 bluestore block wal create = true"
779 if [ "$zoned_enabled" -eq 1 ]; then
781 bluestore min alloc size = 65536
782 bluestore prefer deferred size = 0
783 bluestore prefer deferred size hdd = 0
784 bluestore prefer deferred size ssd = 0
785 bluestore allocator = zoned"
787 if [ "$io_uring_enabled" -eq 1 ]; then
793 if [ "$objectstore" == "seastore" ]; then
794 if [[ ${seastore_size+x} ]]; then
796 seastore device size = $seastore_size"
803 keyring = $keyring_fn
804 log file = $CEPH_OUT_CLIENT_DIR/\$name.\$pid.log
805 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
808 rgw crypt s3 kms backend = testing
809 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
810 rgw crypt require ssl = false
811 ; uncomment the following to set LC days as the value in seconds;
812 ; needed for passing lc time based s3-tests (can be verbose)
813 ; rgw lc debug interval = 10
814 $(format_conf "${extra_conf}")
821 mds data = $CEPH_DEV_DIR/mds.\$id
822 mds root ino uid = `id -u`
823 mds root ino gid = `id -g`
824 $(format_conf "${extra_conf}")
826 mgr disabled modules = rook
827 mgr data = $CEPH_DEV_DIR/mgr.\$id
828 mgr module path = $MGR_PYTHON_PATH
829 cephadm path = $CEPH_BIN/cephadm
831 $(format_conf "${extra_conf}")
834 osd_check_max_object_name_len_on_startup = false
835 osd data = $CEPH_DEV_DIR/osd\$id
836 osd journal = $CEPH_DEV_DIR/osd\$id/journal
837 osd journal size = 100
839 osd class dir = $OBJCLASS_PATH
840 osd class load list = *
841 osd class default list = *
842 osd fast shutdown = false
844 bluestore fsck on mount = true
845 bluestore block create = true
849 kstore fsck on mount = true
850 osd objectstore = $objectstore
853 $(format_conf "${extra_conf}")
855 mon_data_avail_crit = 1
856 mgr initial modules = $mgr_modules
859 $(format_conf "${extra_conf}")
860 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
861 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
862 auth allow insecure global id reclaim = false
865 if [ "$crimson" -eq 1 ]; then
867 osd pool default crimson = true
872 write_logrotate_conf
() {
873 out_dir
=$
(pwd)"/out/*.log"
886 # NOTE: assuring that the absence of one of the following processes
887 # won't abort the logrotate command.
888 killall -u $USER -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || echo ""
895 logrotate_conf_path
=$
(pwd)"/logrotate.conf"
896 logrotate_state_path
=$
(pwd)"/logrotate.state"
898 if ! test -a $logrotate_conf_path; then
899 if test -a $logrotate_state_path; then
900 rm -f $logrotate_state_path
902 write_logrotate_conf
> $logrotate_conf_path
909 for f
in a b c d e f g h i j k l m n o p q r s t u v w x y z
911 [ $count -eq $CEPH_NUM_MON ] && break;
912 count
=$
(($count + 1))
913 if [ -z "$MONS" ]; then
920 if [ "$new" -eq 1 ]; then
921 if [ `echo $IP | grep '^127\\.'` ]; then
923 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
924 echo " connect. either adjust /etc/hosts, or edit this script to use your"
925 echo " machine's real IP."
929 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon.
"$keyring_fn" --cap mon
'allow *'
930 prun
$SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
931 --cap mon
'allow *' \
932 --cap osd
'allow *' \
933 --cap mds
'allow *' \
934 --cap mgr
'allow *' \
937 # build a fresh fs monmap, mon fs
943 if [ $msgr -eq 1 ]; then
944 A
="v1:$IP:$(($CEPH_PORT+$count+1))"
946 if [ $msgr -eq 2 ]; then
947 A
="v2:$IP:$(($CEPH_PORT+$count+1))"
949 if [ $msgr -eq 21 ]; then
950 A
="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
952 params
+=("--addv" "$f" "$A")
953 mon_host
="$mon_host $A"
957 mon data = $CEPH_DEV_DIR/mon.$f
959 count
=$
(($count + 2))
965 prun
"$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
969 prun
rm -rf -- "$CEPH_DEV_DIR/mon.$f"
970 prun mkdir
-p "$CEPH_DEV_DIR/mon.$f"
971 prun
"$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
974 prun
rm -- "$monmap_fn"
980 run
'mon' $f $CEPH_BIN/ceph-mon
-i $f $ARGS $CMON_ARGS
983 if [ "$crimson" -eq 1 ]; then
984 $CEPH_BIN/ceph osd set-allow-crimson
--yes-i-really-mean-it
989 if [ $inc_osd_num -gt 0 ]; then
990 old_maxosd
=$
($CEPH_BIN/ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
992 end
=$
(($start-1+$inc_osd_num))
993 overwrite_conf
=1 # fake wconf
996 end
=$
(($CEPH_NUM_OSD-1))
999 for osd
in `seq $start $end`
1001 local extra_seastar_args
1002 if [ "$ceph_osd" == "crimson-osd" ]; then
1003 bottom_cpu
=$
(( osd
* crimson_smp
))
1004 top_cpu
=$
(( bottom_cpu
+ crimson_smp
- 1 ))
1005 # set a single CPU nodes for each osd
1006 extra_seastar_args
="--cpuset $bottom_cpu-$top_cpu"
1007 if [ "$debug" -ne 0 ]; then
1008 extra_seastar_args
+=" --debug"
1010 if [ "$trace" -ne 0 ]; then
1011 extra_seastar_args
+=" --trace"
1014 if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
1019 if [ "$spdk_enabled" -eq 1 ]; then
1021 bluestore_block_path = spdk:${bluestore_spdk_dev[$osd]}
1023 elif [ "$pmem_enabled" -eq 1 ]; then
1025 bluestore_block_path = ${bluestore_pmem_file}
1028 rm -rf $CEPH_DEV_DIR/osd
$osd || true
1029 if command -v btrfs
> /dev
/null
; then
1030 for f
in $CEPH_DEV_DIR/osd
$osd/*; do btrfs sub delete
$f &> /dev
/null || true
; done
1032 if [ -n "$kstore_path" ]; then
1033 ln -s $kstore_path $CEPH_DEV_DIR/osd
$osd
1035 mkdir
-p $CEPH_DEV_DIR/osd
$osd
1036 if [ -n "${block_devs[$osd]}" ]; then
1037 dd if=/dev
/zero of
=${block_devs[$osd]} bs
=1M count
=1
1038 ln -s ${block_devs[$osd]} $CEPH_DEV_DIR/osd
$osd/block
1040 if [ -n "${secondary_block_devs[$osd]}" ]; then
1041 dd if=/dev
/zero of
=${secondary_block_devs[$osd]} bs
=1M count
=1
1042 mkdir
-p $CEPH_DEV_DIR/osd
$osd/block.
${secondary_block_devs_type}.1
1043 ln -s ${secondary_block_devs[$osd]} $CEPH_DEV_DIR/osd
$osd/block.
${secondary_block_devs_type}.1/block
1046 if [ "$objectstore" == "bluestore" ]; then
1048 bluestore fsck on mount = false
1052 local uuid
=`uuidgen`
1053 echo "add osd$osd $uuid"
1054 OSD_SECRET
=$
($CEPH_BIN/ceph-authtool
--gen-print-key)
1055 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd
$osd/new.json
1056 ceph_adm osd new
$uuid -i $CEPH_DEV_DIR/osd
$osd/new.json
1057 rm $CEPH_DEV_DIR/osd
$osd/new.json
1058 prun
$SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args \
1059 2>&1 |
tee $CEPH_OUT_DIR/osd-mkfs.
$osd.log
1061 local key_fn
=$CEPH_DEV_DIR/osd
$osd/keyring
1069 echo 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
1070 $extra_seastar_args $extra_osd_args \
1071 -i $osd $ARGS $COSD_ARGS
1072 run
'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
1073 $extra_seastar_args $extra_osd_args \
1074 -i $osd $ARGS $COSD_ARGS &
1083 for p
in $osds_wait; do
1086 debug
echo OSDs started
1088 if [ $inc_osd_num -gt 0 ]; then
1090 new_maxosd
=$
($CEPH_BIN/ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1091 sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
1095 create_mgr_restful_secret
() {
1096 while ! ceph_adm
-h |
grep -c -q ^restful
; do
1097 debug
echo 'waiting for mgr restful module to start'
1101 if ceph_adm restful create-self-signed-cert
> /dev
/null
; then
1102 secret_file
=`mktemp`
1103 ceph_adm restful create-key admin
-o $secret_file
1104 RESTFUL_SECRET
=`cat $secret_file`
1107 debug
echo MGR Restful is not working
, perhaps the package is not installed?
1113 local ssl
=${DASHBOARD_SSL:-1}
1114 # avoid monitors on nearby ports (which test/*.sh use extensively)
1115 MGR_PORT
=$
(($CEPH_PORT + 1000))
1116 PROMETHEUS_PORT
=9283
1117 for name
in x y z a b c d e f g h i j k l m n o p
1119 [ $mgr -eq $CEPH_NUM_MGR ] && break
1121 if [ "$new" -eq 1 ]; then
1122 mkdir
-p $CEPH_DEV_DIR/mgr.
$name
1123 key_fn
=$CEPH_DEV_DIR/mgr.
$name/keyring
1124 $SUDO $CEPH_BIN/ceph-authtool
--create-keyring --gen-key --name=mgr.
$name $key_fn
1125 ceph_adm
-i $key_fn auth add mgr.
$name mon
'allow profile mgr' mds
'allow *' osd
'allow *'
1132 if $with_mgr_dashboard ; then
1133 local port_option
="ssl_server_port"
1134 local http_proto
="https"
1135 if [ "$ssl" == "0" ]; then
1136 port_option
="server_port"
1138 ceph_adm config
set mgr mgr
/dashboard
/ssl false
--force
1140 ceph_adm config
set mgr mgr
/dashboard
/$name/$port_option $MGR_PORT --force
1141 if [ $mgr -eq 1 ]; then
1142 DASH_URLS
="$http_proto://$IP:$MGR_PORT"
1144 DASH_URLS
+=", $http_proto://$IP:$MGR_PORT"
1147 MGR_PORT
=$
(($MGR_PORT + 1000))
1148 ceph_adm config
set mgr mgr
/prometheus
/$name/server_port
$PROMETHEUS_PORT --force
1149 PROMETHEUS_PORT
=$
(($PROMETHEUS_PORT + 1000))
1151 ceph_adm config
set mgr mgr
/restful
/$name/server_port
$MGR_PORT --force
1152 if [ $mgr -eq 1 ]; then
1153 RESTFUL_URLS
="https://$IP:$MGR_PORT"
1155 RESTFUL_URLS
+=", https://$IP:$MGR_PORT"
1157 MGR_PORT
=$
(($MGR_PORT + 1000))
1160 debug
echo "Starting mgr.${name}"
1161 run
'mgr' $name $CEPH_BIN/ceph-mgr
-i $name $ARGS
1164 while ! ceph_adm mgr stat | jq
-e '.available'; do
1165 debug
echo 'waiting for mgr to become available'
1169 if [ "$new" -eq 1 ]; then
1170 # setting login credentials for dashboard
1171 if $with_mgr_dashboard; then
1172 while ! ceph_adm
-h |
grep -c -q ^dashboard
; do
1173 debug
echo 'waiting for mgr dashboard module to start'
1176 DASHBOARD_ADMIN_SECRET_FILE
="${CEPH_CONF_PATH}/dashboard-admin-secret.txt"
1177 printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
1178 ceph_adm dashboard ac-user-create admin
-i "${DASHBOARD_ADMIN_SECRET_FILE}" \
1179 administrator
--force-password
1180 if [ "$ssl" != "0" ]; then
1181 if ! ceph_adm dashboard create-self-signed-cert
; then
1182 debug
echo dashboard module not working correctly
!
1186 if $with_mgr_restful; then
1187 create_mgr_restful_secret
1191 if [ "$cephadm" -eq 1 ]; then
1192 debug
echo Enabling cephadm orchestrator
1193 if [ "$new" -eq 1 ]; then
1195 https
://hub.docker.com
/v
2/repositories
/ceph
/daemon-base
/tags
/latest-master-devel \
1196 | jq
-r '.images[0].digest')
1197 ceph_adm config
set global container_image
"docker.io/ceph/daemon-base@$digest"
1199 ceph_adm config-key
set mgr
/cephadm
/ssh_identity_key
-i ~
/.ssh
/id_rsa
1200 ceph_adm config-key
set mgr
/cephadm
/ssh_identity_pub
-i ~
/.ssh
/id_rsa.pub
1201 ceph_adm mgr module
enable cephadm
1202 ceph_adm orch
set backend cephadm
1203 ceph_adm orch
host add
"$(hostname)"
1204 ceph_adm orch apply crash
'*'
1205 ceph_adm config
set mgr mgr
/cephadm
/allow_ptrace true
1211 for name
in a b c d e f g h i j k l m n o p
1213 [ $mds -eq $CEPH_NUM_MDS ] && break
1216 if [ "$new" -eq 1 ]; then
1217 prun mkdir
-p "$CEPH_DEV_DIR/mds.$name"
1218 key_fn
=$CEPH_DEV_DIR/mds.
$name/keyring
1223 if [ "$standby" -eq 1 ]; then
1224 mkdir
-p $CEPH_DEV_DIR/mds.
${name}s
1226 mds standby for rank = $mds
1228 mds standby replay = true
1229 mds standby for name = ${name}
1232 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
1233 ceph_adm
-i "$key_fn" auth add
"mds.$name" mon
'allow profile mds' osd
'allow rw tag cephfs *=*' mds
'allow' mgr
'allow profile mds'
1234 if [ "$standby" -eq 1 ]; then
1235 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
1236 "$CEPH_DEV_DIR/mds.${name}s/keyring"
1237 ceph_adm
-i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add
"mds.${name}s" \
1238 mon
'allow profile mds' osd
'allow *' mds
'allow' mgr
'allow profile mds'
1242 run
'mds' $name $CEPH_BIN/ceph-mds
-i $name $ARGS $CMDS_ARGS
1243 if [ "$standby" -eq 1 ]; then
1244 run
'mds' $name $CEPH_BIN/ceph-mds
-i ${name}s
$ARGS $CMDS_ARGS
1247 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
1248 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
1249 #ceph_adm mds set max_mds 2
1252 if [ $new -eq 1 ]; then
1253 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
1254 sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
1255 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
1256 ceph_adm fs flag
set enable_multiple true
--yes-i-really-mean-it
1259 # wait for volume module to load
1260 while ! ceph_adm fs volume
ls ; do sleep 1 ; done
1262 for name
in a b c d e f g h i j k l m n o p
1264 ceph_adm fs volume create
${name}
1265 ceph_adm fs authorize
${name} "client.fs_${name}" / rwp
>> "$keyring_fn"
1267 [ $fs -eq $CEPH_NUM_FS ] && break
1274 # Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
1275 # nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
1276 # Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
1277 # the packages are available at
1278 # https://wiki.centos.org/SpecialInterestGroup/Storage
1279 # Similarly for Ubuntu>=16.04 follow the instructions on
1280 # https://launchpad.net/~nfs-ganesha
1284 GANESHA_PORT
=$
(($CEPH_PORT + 4000))
1286 test_user
="$cluster_id"
1288 namespace
=$cluster_id
1289 url
="rados://$pool_name/$namespace/conf-nfs.$test_user"
1291 prun ceph_adm auth get-or-create client.
$test_user \
1293 osd
"allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
1294 mds
"allow rw path=/" \
1297 ceph_adm mgr module
enable test_orchestrator
1298 ceph_adm orch
set backend test_orchestrator
1299 ceph_adm test_orchestrator load_data
-i $CEPH_ROOT/src
/pybind
/mgr
/test_orchestrator
/dummy_data.json
1300 prun ceph_adm nfs cluster create
$cluster_id
1301 prun ceph_adm nfs
export create cephfs
--fsname "a" --cluster-id $cluster_id --pseudo-path "/cephfs"
1303 for name
in a b c d e f g h i j k l m n o p
1305 [ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
1307 port
=$
(($GANESHA_PORT + ganesha
))
1308 ganesha
=$
(($ganesha + 1))
1309 ganesha_dir
="$CEPH_DEV_DIR/ganesha.$name"
1310 prun
rm -rf $ganesha_dir
1311 prun mkdir
-p $ganesha_dir
1313 echo "NFS_CORE_PARAM {
1315 Enable_RQUOTA = false;
1325 RecoveryBackend = rados_cluster;
1326 Minor_Versions = 1, 2;
1330 pool = '$pool_name';
1331 namespace = $namespace;
1332 UserId = $test_user;
1337 Userid = $test_user;
1341 %url $url" > "$ganesha_dir/ganesha-$name.conf"
1347 ganesha data = $ganesha_dir
1348 pid file = $CEPH_OUT_DIR/ganesha-$name.pid
1351 prun env CEPH_CONF
="${conf_fn}" ganesha-rados-grace
--userid $test_user -p $pool_name -n $namespace add
$name
1352 prun env CEPH_CONF
="${conf_fn}" ganesha-rados-grace
--userid $test_user -p $pool_name -n $namespace
1354 prun env CEPH_CONF
="${conf_fn}" ganesha.nfsd
-L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
1356 # Wait few seconds for grace period to be removed
1359 prun env CEPH_CONF
="${conf_fn}" ganesha-rados-grace
--userid $test_user -p $pool_name -n $namespace
1361 echo "$test_user ganesha daemon $name started on port: $port"
1365 if [ "$debug" -eq 0 ]; then
1372 debug
echo "** going verbose **"
1387 # Crimson doesn't support PG merge/split yet.
1388 if [ "$ceph_osd" == "crimson-osd" ]; then
1390 osd_pool_default_pg_autoscale_mode = off'
1393 if [ -n "$MON_ADDR" ]; then
1394 CMON_ARGS
=" -m "$MON_ADDR
1395 COSD_ARGS
=" -m "$MON_ADDR
1396 CMDS_ARGS
=" -m "$MON_ADDR
1399 if [ -z "$CEPH_PORT" ]; then
1402 CEPH_PORT
="$(echo $(( RANDOM % 1000 + 40000 )))"
1403 ss
-a -n |
egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev
/null
2>&1 ||
break
1407 [ -z "$INIT_CEPH" ] && INIT_CEPH
=$CEPH_BIN/init-ceph
1410 [ -d $CEPH_DEV_DIR/osd
0/.
] && [ -e $CEPH_DEV_DIR/sudo
] && SUDO
="sudo"
1412 if [ $inc_osd_num -eq 0 ]; then
1413 prun
$SUDO rm -f core
*
1416 [ -d $CEPH_ASOK_DIR ] || mkdir
-p $CEPH_ASOK_DIR
1417 [ -d $CEPH_OUT_DIR ] || mkdir
-p $CEPH_OUT_DIR
1418 [ -d $CEPH_DEV_DIR ] || mkdir
-p $CEPH_DEV_DIR
1419 [ -d $CEPH_OUT_CLIENT_DIR ] || mkdir
-p $CEPH_OUT_CLIENT_DIR
1420 if [ $inc_osd_num -eq 0 ]; then
1421 $SUDO find "$CEPH_OUT_DIR" -type f
-delete
1423 [ -d gmon
] && $SUDO rm -rf gmon
/*
1425 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
1428 # figure machine's ip
1429 HOSTNAME
=`hostname -s`
1430 if [ -n "$ip" ]; then
1433 echo hostname
$HOSTNAME
1434 if [ -x "$(which ip 2>/dev/null)" ]; then
1439 # filter out IPv4 and localhost addresses
1440 IP
="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
1441 # if nothing left, try using localhost address, it might work
1442 if [ -z "$IP" ]; then IP
="127.0.0.1"; fi
1445 echo "port $CEPH_PORT"
1448 [ -z $CEPH_ADM ] && CEPH_ADM
=$CEPH_BIN/ceph
1451 if [ "$cephx" -eq 1 ]; then
1452 prun
$SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
1454 prun
$SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
1458 if [ $inc_osd_num -gt 0 ]; then
1463 if [ "$new" -eq 1 ]; then
1467 if [ $CEPH_NUM_MON -gt 0 ]; then
1470 debug
echo Populating config ...
1471 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1473 osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
1474 osd_pool_default_min_size = 1
1477 mon_osd_reporter_subtree_level = osd
1478 mon_data_avail_warn = 2
1479 mon_data_avail_crit = 1
1480 mon_allow_pool_delete = true
1481 mon_allow_pool_size_one = true
1484 osd_scrub_load_threshold = 2000
1485 osd_debug_op_order = true
1486 osd_debug_misdirected_ops = true
1487 osd_copyfrom_max_chunk = 524288
1490 mds_debug_frag = true
1491 mds_debug_auth_pins = true
1492 mds_debug_subtrees = true
1495 mgr/telemetry/nag = false
1496 mgr/telemetry/enable = false
1500 if [ "$debug" -ne 0 ]; then
1501 debug
echo Setting debug configs ...
1502 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1516 debug_bluestore = 20
1528 mds_debug_scatterstat = true
1529 mds_verify_scatter = true
1532 if [ "$cephadm" -gt 0 ]; then
1533 debug
echo Setting mon public_network ...
1534 public_network
=$
(ip route list |
grep -w "$IP" |
awk '{print $1}')
1535 ceph_adm config
set mon public_network
$public_network
1539 if [ "$ceph_osd" == "crimson-osd" ]; then
1540 $CEPH_BIN/ceph
-c $conf_fn config
set osd crimson_seastar_smp
$crimson_smp
1543 if [ $CEPH_NUM_MGR -gt 0 ]; then
1548 if [ $CEPH_NUM_OSD -gt 0 ]; then
1553 if [ "$smallmds" -eq 1 ]; then
1556 mds log max segments = 2
1557 # Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
1558 mds cache memory limit = 100M
1562 if [ $CEPH_NUM_MDS -gt 0 ]; then
1564 # key with access to all FS
1565 ceph_adm fs authorize \
* "client.fs" / rwp
>> "$keyring_fn"
1568 # Don't set max_mds until all the daemons are started, otherwise
1569 # the intended standbys might end up in active roles.
1570 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1571 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
1574 for name
in a b c d e f g h i j k l m n o p
1576 [ $fs -eq $CEPH_NUM_FS ] && break
1578 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1579 ceph_adm fs
set "${name}" max_mds
"$CEPH_MAX_MDS"
1585 if [ "$ec" -eq 1 ]; then
1587 osd erasure-code-profile set ec-profile m=2 k=2
1588 osd pool create ec erasure ec-profile
1593 while [ -n "$*" ]; do
1596 debug
echo "creating cache for pool $p ..."
1598 osd pool create ${p}-cache
1599 osd tier add $p ${p}-cache
1600 osd tier cache-mode ${p}-cache writeback
1601 osd tier set-overlay $p ${p}-cache
1608 while [ -n "$*" ]; do
1613 debug
echo "setting hit_set on pool $pool type $type ..."
1615 osd pool set $pool hit_set_type $type
1616 osd pool set $pool hit_set_count 8
1617 osd pool set $pool hit_set_period 30
1623 do_rgw_create_bucket
()
1626 local rgw_python_file
='rgw-create-bucket.py'
1628 import boto.s3.connection
1630 conn = boto.connect_s3(
1631 aws_access_key_id = '$s3_akey',
1632 aws_secret_access_key = '$s3_skey',
1636 calling_format = boto.s3.connection.OrdinaryCallingFormat(),
1639 bucket = conn.create_bucket('nfs-bucket')
1640 print('created new bucket')" > "$CEPH_OUT_DIR/$rgw_python_file"
1641 prun python
$CEPH_OUT_DIR/$rgw_python_file
1644 do_rgw_create_users
()
1647 s3_akey
='0555b35654ad1656d804'
1648 s3_skey
='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
1649 debug
echo "setting up user testid"
1650 $CEPH_BIN/radosgw-admin user create
--uid testid
--access-key $s3_akey --secret $s3_skey --display-name 'M. Tester' --email tester@ceph.com
-c $conf_fn > /dev
/null
1652 # Create S3-test users
1653 # See: https://github.com/ceph/s3-tests
1654 debug
echo "setting up s3-test users"
1655 $CEPH_BIN/radosgw-admin user create \
1656 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1657 --access-key ABCDEFGHIJKLMNOPQRST \
1658 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
1659 --display-name youruseridhere \
1660 --email s3@example.com
--caps="user-policy=*" -c $conf_fn > /dev
/null
1661 $CEPH_BIN/radosgw-admin user create \
1662 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
1663 --access-key NOPQRSTUVWXYZABCDEFG \
1664 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
1665 --display-name john.doe \
1666 --email john.doe@example.com
-c $conf_fn > /dev
/null
1667 $CEPH_BIN/radosgw-admin user create \
1669 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1670 --access-key HIJKLMNOPQRSTUVWXYZA \
1671 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
1672 --display-name tenanteduser \
1673 --email tenanteduser@example.com
-c $conf_fn > /dev
/null
1676 debug
echo "setting up user tester"
1677 $CEPH_BIN/radosgw-admin user create
-c $conf_fn --subuser=test:tester
--display-name=Tester-Subuser
--key-type=swift
--secret=testing
--access=full
> /dev
/null
1680 echo "S3 User Info:"
1681 echo " access key: $s3_akey"
1682 echo " secret key: $s3_skey"
1684 echo "Swift User Info:"
1685 echo " account : test"
1686 echo " user : tester"
1687 echo " password : testing"
1693 if [ "$new" -eq 1 ]; then
1695 if [ -n "$rgw_compression" ]; then
1696 debug
echo "setting compression type=$rgw_compression"
1697 $CEPH_BIN/radosgw-admin zone placement modify
-c $conf_fn --rgw-zone=default
--placement-id=default-placement
--compression=$rgw_compression > /dev
/null
1701 if [ -n "$rgw_flight_frontend" ] ;then
1702 debug
echo "starting arrow_flight frontend on first rgw"
1706 if [ "$cephadm" -gt 0 ]; then
1707 ceph_adm orch apply rgw rgwTest
1712 if [ "$debug" -ne 0 ]; then
1713 RGWDEBUG
="--debug-rgw=20 --debug-ms=1"
1716 local CEPH_RGW_PORT_NUM
="${CEPH_RGW_PORT}"
1717 local CEPH_RGW_HTTPS
="${CEPH_RGW_PORT: -1}"
1718 if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
1719 CEPH_RGW_PORT_NUM
="${CEPH_RGW_PORT::-1}"
1724 [ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO
=sudo
1726 current_port
=$CEPH_RGW_PORT
1727 # allow only first rgw to start arrow_flight server/port
1728 local flight_conf
=$rgw_flight_frontend
1729 for n
in $
(seq 1 $CEPH_NUM_RGW); do
1730 rgw_name
="client.rgw.${current_port}"
1732 ceph_adm auth get-or-create
$rgw_name \
1738 debug
echo start rgw on http
${CEPH_RGW_HTTPS}://localhost
:${current_port}
1739 run
'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw
-c $conf_fn \
1740 --log-file=${CEPH_OUT_DIR}/radosgw.
${current_port}.log \
1741 --admin-socket=${CEPH_OUT_DIR}/radosgw.
${current_port}.asok \
1742 --pid-file=${CEPH_OUT_DIR}/radosgw.
${current_port}.pid \
1743 --rgw_luarocks_location=${CEPH_OUT_DIR}/luarocks \
1746 "--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}${flight_conf:+,arrow_flight}"
1749 [ $i -eq $CEPH_NUM_RGW ] && break
1751 current_port
=$
((current_port
+1))
1755 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1760 if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
1761 pseudo_path
="/cephfs"
1762 if [ "$cephadm" -gt 0 ]; then
1765 prun ceph_adm nfs cluster create
$cluster_id
1766 if [ $CEPH_NUM_MDS -gt 0 ]; then
1767 prun ceph_adm nfs
export create cephfs
--fsname "a" --cluster-id $cluster_id --pseudo-path $pseudo_path
1768 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1770 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1772 do_rgw_create_bucket
1773 prun ceph_adm nfs
export create rgw
--cluster-id $cluster_id --pseudo-path $pseudo_path --bucket "nfs-bucket"
1774 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1778 echo "Mount using: mount -t nfs -o port=<ganesha-port-num> $IP:$pseudo_path mountpoint"
1785 if command -v podman
> /dev
/null
; then
1787 elif pgrep
-f docker
> /dev
/null
; then
1790 if [ -n "$service" ]; then
1791 echo "using $service for deploying jaeger..."
1792 #check for exited container, remove them and restart container
1793 if [ "$($service ps -aq -f status=exited -f name=jaeger)" ]; then
1796 if [ ! "$(podman ps -aq -f name=jaeger)" ]; then
1800 echo "cannot find docker or podman, please restart service and rerun."
1805 if [ $with_jaeger -eq 1 ]; then
1806 debug
echo "Enabling jaegertracing..."
1807 docker_service run
-d --name jaeger \
1815 quay.io
/jaegertracing
/all-in-one
1818 debug
echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
1821 if [ "$new" -eq 1 ]; then
1822 if $with_mgr_dashboard; then
1824 dashboard urls: $DASH_URLS
1825 w/ user/pass: admin / admin
1828 if $with_mgr_restful; then
1830 restful urls: $RESTFUL_URLS
1831 w/ user/pass: admin / $RESTFUL_SECRET
1837 # add header to the environment file
1840 echo "# source this file into your shell to set up the environment."
1841 echo "# For example:"
1842 echo "# $ . $CEPH_DIR/vstart_environment.sh"
1844 } > $CEPH_DIR/vstart_environment.sh
1846 echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
1847 echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
1848 echo "export PATH=$CEPH_DIR/bin:\$PATH"
1850 if [ "$CEPH_DIR" != "$PWD" ]; then
1851 echo "export CEPH_CONF=$conf_fn"
1852 echo "export CEPH_KEYRING=$keyring_fn"
1855 if [ -n "$CEPHFS_SHELL" ]; then
1856 echo "alias cephfs-shell=$CEPHFS_SHELL"
1858 } |
tee -a $CEPH_DIR/vstart_environment.sh
1862 # always keep this section at the very bottom of this file
1863 STRAY_CONF_PATH
="/etc/ceph/ceph.conf"
1864 if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
1868 echo " Please remove stray $STRAY_CONF_PATH if not needed."
1869 echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
1870 echo " and may lead to undesired results."
1873 echo " Remember to restart cluster after removing $STRAY_CONF_PATH"