6 if [ -n "$VSTART_DEST" ]; then
8 SRC_PATH
=`(cd $SRC_PATH; pwd)`
14 CEPH_CONF_PATH
=$VSTART_DEST
15 CEPH_DEV_DIR
=$VSTART_DEST/dev
16 CEPH_OUT_DIR
=$VSTART_DEST/out
19 # for running out of the CMake build directory
20 if [ -e CMakeCache.txt
]; then
21 # Out of tree build, learn source location from CMakeCache.txt
22 CEPH_ROOT
=`grep ceph_SOURCE_DIR CMakeCache.txt | cut -d "=" -f 2`
24 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH
=$CEPH_ROOT/src
/pybind
/mgr
27 # use CEPH_BUILD_ROOT to vstart from a 'make install'
28 if [ -n "$CEPH_BUILD_ROOT" ]; then
29 [ -z "$CEPH_BIN" ] && CEPH_BIN
=$CEPH_BUILD_ROOT/bin
30 [ -z "$CEPH_LIB" ] && CEPH_LIB
=$CEPH_BUILD_ROOT/lib
31 [ -z "$EC_PATH" ] && EC_PATH
=$CEPH_LIB/erasure-code
32 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH
=$CEPH_LIB/rados-classes
33 elif [ -n "$CEPH_ROOT" ]; then
34 [ -z "$PYBIND" ] && PYBIND
=$CEPH_ROOT/src
/pybind
35 [ -z "$CEPH_BIN" ] && CEPH_BIN
=$CEPH_BUILD_DIR/bin
36 [ -z "$CEPH_ADM" ] && CEPH_ADM
=$CEPH_BIN/ceph
37 [ -z "$INIT_CEPH" ] && INIT_CEPH
=$CEPH_BUILD_DIR/bin
/init-ceph
38 [ -z "$CEPH_LIB" ] && CEPH_LIB
=$CEPH_BUILD_DIR/lib
39 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH
=$CEPH_LIB
40 [ -z "$EC_PATH" ] && EC_PATH
=$CEPH_LIB
43 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
47 [ -z "$PYBIND" ] && PYBIND
=.
/pybind
49 export PYTHONPATH
=$PYBIND:$CEPH_LIB/cython_modules
/lib
.2:$PYTHONPATH
50 export LD_LIBRARY_PATH
=$CEPH_LIB:$LD_LIBRARY_PATH
51 export DYLD_LIBRARY_PATH
=$CEPH_LIB:$DYLD_LIBRARY_PATH
52 # Suppress logging for regular use that indicated that we are using a
53 # development version. vstart.sh is only used during testing and
57 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON
="$MON"
58 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD
="$OSD"
59 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS
="$MDS"
60 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR
="$MGR"
61 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS
="$FS"
62 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW
="$RGW"
64 # if none of the CEPH_NUM_* number is specified, kill the existing
66 if [ -z "$CEPH_NUM_MON" -a \
67 -z "$CEPH_NUM_OSD" -a \
68 -z "$CEPH_NUM_MDS" -a \
69 -z "$CEPH_NUM_MGR" ]; then
75 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON
=3
76 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD
=3
77 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS
=3
78 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR
=1
79 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS
=1
80 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS
=1
81 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW
=0
83 [ -z "$CEPH_DIR" ] && CEPH_DIR
="$PWD"
84 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR
="$CEPH_DIR/dev"
85 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR
="$CEPH_DIR/out"
86 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT
=8000
87 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH
=$CEPH_DIR
89 if [ $CEPH_NUM_OSD -gt 3 ]; then
90 OSD_POOL_DEFAULT_SIZE
=3
92 OSD_POOL_DEFAULT_SIZE
=$CEPH_NUM_OSD
106 cephx
=1 #turn cephx on by default
110 rgw_frontend
="civetweb"
111 lockdep
=${LOCKDEP:-1}
115 VSTART_SEC
="client.vstart.sh"
121 conf_fn
="$CEPH_CONF_PATH/ceph.conf"
122 keyring_fn
="$CEPH_CONF_PATH/keyring"
123 osdmap_fn
="/tmp/ceph_osdmap.$$"
124 monmap_fn
="/tmp/ceph_monmap.$$"
126 usage
="usage: $0 [option]... \nex: $0 -n -d --mon_num 3 --osd_num 3 --mds_num 1 --rgw_num 1\n"
127 usage
=$usage"options:\n"
128 usage
=$usage"\t-d, --debug\n"
129 usage
=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
130 usage
=$usage"\t-l, --localhost: use localhost instead of hostname\n"
131 usage
=$usage"\t-i <ip>: bind to specific ip\n"
132 usage
=$usage"\t-n, --new\n"
133 usage
=$usage"\t-N, --not-new: reuse existing cluster config (default)\n"
134 usage
=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
135 usage
=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
136 usage
=$usage"\t--smallmds: limit mds cache size\n"
137 usage
=$usage"\t-m ip:port\t\tspecify monitor address\n"
138 usage
=$usage"\t-k keep old configuration files\n"
139 usage
=$usage"\t-x enable cephx (on by default)\n"
140 usage
=$usage"\t-X disable cephx\n"
141 usage
=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
142 usage
=$usage"\t-e : create an erasure pool\n";
143 usage
=$usage"\t-o config\t\t add extra config parameters to all sections\n"
144 usage
=$usage"\t--mon_num specify ceph monitor count\n"
145 usage
=$usage"\t--osd_num specify ceph osd count\n"
146 usage
=$usage"\t--mds_num specify ceph mds count\n"
147 usage
=$usage"\t--rgw_num specify ceph rgw count\n"
148 usage
=$usage"\t--mgr_num specify ceph mgr count\n"
149 usage
=$usage"\t--rgw_port specify ceph rgw http listen port\n"
150 usage
=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
151 usage
=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend\n"
152 usage
=$usage"\t--memstore use memstore as the osd objectstore backend\n"
153 usage
=$usage"\t--cache <pool>: enable cache tiering on pool\n"
154 usage
=$usage"\t--short: short object names only; necessary for ext4 dev\n"
155 usage
=$usage"\t--nolockdep disable lockdep\n"
156 usage
=$usage"\t--multimds <count> allow multimds with maximum active count\n"
163 while [ $# -ge 1 ]; do
175 [ -z "$2" ] && usage_exit
192 [ -z "$2" ] && usage_exit
201 [ -z "$2" ] && usage_exit
206 [ -z "$2" ] && usage_exit
211 [ -z "$2" ] && usage_exit
216 [ -z "$2" ] && usage_exit
221 [ -z "$2" ] && usage_exit
265 [ -z "$2" ] && usage_exit
270 cephx
=1 # this is on be default, flag exists for historical consistency
276 if [ ! -r $conf_fn ]; then
277 echo "cannot use old configuration: $conf_fn not readable." >&2
289 hitset
="$hitset $2 $3"
294 extra_conf
="$extra_conf $2
299 if [ -z "$cache" ]; then
319 if [ $kill_all -eq 1 ]; then
320 $SUDO $INIT_CEPH stop
323 if [ "$overwrite_conf" -eq 0 ]; then
324 MON
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mon 2>/dev/null` && \
326 OSD
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_osd 2>/dev/null` && \
328 MDS
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mds 2>/dev/null` && \
330 MGR
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mgr 2>/dev/null` && \
332 RGW
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_rgw 2>/dev/null` && \
335 if [ "$new" -ne 0 ]; then
337 [ -e "$conf_fn" ] && rm -- "$conf_fn"
339 # -k is implied... (doesn't make sense otherwise)
359 eval "valg=\$valgrind_$type"
360 [ -z "$valg" ] && valg
="$valgrind"
362 if [ -n "$valg" ]; then
363 prunb valgrind
--tool="$valg" $valgrind_args "$@" -f
366 if [ "$nodaemon" -eq 0 ]; then
369 prunb .
/ceph-run
"$@" -f
375 if [ "$overwrite_conf" -eq 1 ]; then
382 log file = $CEPH_OUT_DIR/\$name.log
383 admin socket = $CEPH_OUT_DIR/\$name.asok
385 pid file = $CEPH_OUT_DIR/\$name.pid
386 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
390 ; generated by vstart.sh on `date`
392 num mon = $CEPH_NUM_MON
393 num osd = $CEPH_NUM_OSD
394 num mds = $CEPH_NUM_MDS
395 num mgr = $CEPH_NUM_MGR
396 num rgw = $CEPH_NUM_RGW
401 osd pgp bits = 5 ; (invalid, but ceph should cope!)
402 osd pool default size = $OSD_POOL_DEFAULT_SIZE
403 osd crush chooseleaf type = 0
404 osd pool default min size = 1
405 osd failsafe full ratio = .99
406 mon osd reporter subtree level = osd
407 mon osd full ratio = .99
408 mon data avail warn = 10
409 mon data avail crit = 1
410 erasure code dir = $EC_PATH
411 plugin dir = $CEPH_LIB
412 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd
413 rgw frontends = $rgw_frontend port=$CEPH_RGW_PORT
415 rgw enable static website = 1
416 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
417 rgw crypt require ssl = false
418 rgw lc debug interval = 10
419 filestore fd cache size = 32
420 run dir = $CEPH_OUT_DIR
421 enable experimental unrecoverable data corrupting features = *
423 if [ "$lockdep" -eq 1 ] ; then
428 if [ "$cephx" -eq 1 ] ; then
430 auth cluster required = cephx
431 auth service required = cephx
432 auth client required = cephx
436 auth cluster required = none
437 auth service required = none
438 auth client required = none
441 if [ "$short" -eq 1 ]; then
442 COSDSHORT
=" osd max object name len = 460
443 osd max object namespace len = 64"
447 keyring = $keyring_fn
448 log file = $CEPH_OUT_DIR/\$name.\$pid.log
449 admin socket = $CEPH_OUT_DIR/\$name.\$pid.asok
454 mds debug frag = true
455 mds debug auth pins = true
456 mds debug subtrees = true
457 mds data = $CEPH_DEV_DIR/mds.\$id
458 mds root ino uid = `id -u`
459 mds root ino gid = `id -g`
462 mgr modules = restful fsstatus dashboard
463 mgr data = $CEPH_DEV_DIR/mgr.\$id
464 mgr module path = $MGR_PYTHON_PATH
465 mon reweight min pgs per osd = 4
466 mon pg warn min per osd = 3
472 osd_check_max_object_name_len_on_startup = false
473 osd data = $CEPH_DEV_DIR/osd\$id
474 osd journal = $CEPH_DEV_DIR/osd\$id/journal
475 osd journal size = 100
477 osd class dir = $OBJCLASS_PATH
478 osd class load list = *
479 osd class default list = *
480 osd scrub load threshold = 2000.0
481 osd debug op order = true
482 osd debug misdirected ops = true
483 filestore wbthrottle xfs ios start flusher = 10
484 filestore wbthrottle xfs ios hard limit = 20
485 filestore wbthrottle xfs inodes hard limit = 30
486 filestore wbthrottle btrfs ios start flusher = 10
487 filestore wbthrottle btrfs ios hard limit = 20
488 filestore wbthrottle btrfs inodes hard limit = 30
489 osd copyfrom max chunk = 524288
490 bluestore fsck on mount = true
491 bluestore block create = true
492 bluestore block db size = 67108864
493 bluestore block db create = true
494 bluestore block wal size = 1048576000
495 bluestore block wal create = true
501 mon pg warn min per osd = 3
502 mon osd allow primary affinity = true
503 mon osd allow pg upmap = true
504 mon reweight min pgs per osd = 4
505 mon osd prime pg temp = true
506 crushtool = $CEPH_BIN/crushtool
507 mon allow pool delete = true
511 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
520 for f
in a b c d e f g h i j k l m n o p q r s t u v w x y z
522 [ $count -eq $CEPH_NUM_MON ] && break;
523 count
=$
(($count + 1))
532 if [ "$new" -eq 1 ]; then
533 if [ `echo $IP | grep '^127\\.'` ]
536 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
537 echo " connect. either adjust /etc/hosts, or edit this script to use your"
538 echo " machine's real IP."
542 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon.
"$keyring_fn" --cap mon
'allow *'
543 prun
$SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin
--set-uid=0 \
544 --cap mon
'allow *' \
545 --cap osd
'allow *' \
546 --cap mds
'allow *' \
547 --cap mgr
'allow *' \
550 # build a fresh fs monmap, mon fs
555 str
="$str --add $f $IP:$(($CEPH_PORT+$count))"
559 mon data = $CEPH_DEV_DIR/mon.$f
560 mon addr = $IP:$(($CEPH_PORT+$count))
562 count
=$
(($count + 1))
564 prun
"$CEPH_BIN/monmaptool" --create --clobber $str --print "$monmap_fn"
568 prun
rm -rf -- "$CEPH_DEV_DIR/mon.$f"
569 prun mkdir
-p "$CEPH_DEV_DIR/mon.$f"
570 prun
"$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
573 prun
rm -- "$monmap_fn"
579 run
'mon' $CEPH_BIN/ceph-mon
-i $f $ARGS $CMON_ARGS
584 for osd
in `seq 0 $((CEPH_NUM_OSD-1))`
586 if [ "$new" -eq 1 ]; then
592 rm -rf $CEPH_DEV_DIR/osd
$osd || true
593 if command -v btrfs
> /dev
/null
; then
594 for f
in $CEPH_DEV_DIR/osd
$osd/*; do btrfs sub delete
$f &> /dev
/null || true
; done
596 if [ -n "$filestore_path" ]; then
597 ln -s $filestore_path $CEPH_DEV_DIR/osd
$osd
599 mkdir
-p $CEPH_DEV_DIR/osd
$osd
603 echo "add osd$osd $uuid"
604 ceph_adm osd create
$uuid
605 ceph_adm osd crush add osd.
$osd 1.0 host=$HOSTNAME root
=default
606 $SUDO $CEPH_BIN/ceph-osd
-i $osd $ARGS --mkfs --mkkey --osd-uuid $uuid
608 local key_fn
=$CEPH_DEV_DIR/osd
$osd/keyring
609 echo adding osd
$osd key to auth repository
610 ceph_adm
-i "$key_fn" auth add osd.
$osd osd
"allow *" mon
"allow profile osd" mgr
"allow profile osd"
613 run
'osd' $SUDO $CEPH_BIN/ceph-osd
-i $osd $ARGS $COSD_ARGS
619 # avoid monitors on nearby ports (which test/*.sh use extensively)
620 MGR_PORT
=$
(($CEPH_PORT + 1000))
621 for name
in x y z a b c d e f g h i j k l m n o p
623 [ $mgr -eq $CEPH_NUM_MGR ] && break
625 if [ "$new" -eq 1 ]; then
626 mkdir
-p $CEPH_DEV_DIR/mgr.
$name
627 key_fn
=$CEPH_DEV_DIR/mgr.
$name/keyring
628 $SUDO $CEPH_BIN/ceph-authtool
--create-keyring --gen-key --name=mgr.
$name $key_fn
629 ceph_adm
-i $key_fn auth add mgr.
$name mon
'allow profile mgr' mds
'allow *' osd
'allow *'
637 ceph_adm config-key put mgr
/dashboard
/$name/server_addr
$IP
638 ceph_adm config-key put mgr
/dashboard
/$name/server_port
$MGR_PORT
639 DASH_URLS
+="http://$IP:$MGR_PORT/"
640 MGR_PORT
=$
(($MGR_PORT + 1000))
642 ceph_adm config-key put mgr
/restful
/$name/server_addr
$IP
643 ceph_adm config-key put mgr
/restful
/$name/server_port
$MGR_PORT
645 RESTFUL_URLS
+="https://$IP:$MGR_PORT"
646 MGR_PORT
=$
(($MGR_PORT + 1000))
648 echo "Starting mgr.${name}"
649 run
'mgr' $CEPH_BIN/ceph-mgr
-i $name $ARGS
653 ceph_adm tell mgr restful create-self-signed-cert
654 ceph_adm tell mgr restful create-key admin
-o $SF
655 RESTFUL_SECRET
=`cat $SF`
660 if [ $new -eq 1 ]; then
661 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
662 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
663 ceph_adm fs flag
set enable_multiple true
--yes-i-really-mean-it
667 for name
in a b c d e f g h i j k l m n o p
669 ceph_adm osd pool create
"cephfs_data_${name}" 8
670 ceph_adm osd pool create
"cephfs_metadata_${name}" 8
671 ceph_adm fs new
"cephfs_${name}" "cephfs_metadata_${name}" "cephfs_data_${name}"
673 [ $fs -eq $CEPH_NUM_FS ] && break
679 for name
in a b c d e f g h i j k l m n o p
681 [ $mds -eq $CEPH_NUM_MDS ] && break
684 if [ "$new" -eq 1 ]; then
685 prun mkdir
-p "$CEPH_DEV_DIR/mds.$name"
686 key_fn
=$CEPH_DEV_DIR/mds.
$name/keyring
691 if [ "$standby" -eq 1 ]; then
692 mkdir
-p $CEPH_DEV_DIR/mds.
${name}s
694 mds standby for rank = $mds
696 mds standby replay = true
697 mds standby for name = ${name}
700 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
701 ceph_adm
-i "$key_fn" auth add
"mds.$name" mon
'allow profile mds' osd
'allow *' mds
'allow' mgr
'allow profile mds'
702 if [ "$standby" -eq 1 ]; then
703 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
704 "$CEPH_DEV_DIR/mds.${name}s/keyring"
705 ceph_adm
-i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add
"mds.${name}s" \
706 mon
'allow profile mds' osd
'allow *' mds
'allow' mgr
'allow profile mds'
710 run
'mds' $CEPH_BIN/ceph-mds
-i $name $ARGS $CMDS_ARGS
711 if [ "$standby" -eq 1 ]; then
712 run
'mds' $CEPH_BIN/ceph-mds
-i ${name}s
$ARGS $CMDS_ARGS
715 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
716 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
717 #ceph_adm mds set max_mds 2
721 if [ "$debug" -eq 0 ]; then
732 echo "** going verbose **"
759 mds debug scatterstat = true
760 mds verify scatter = true
761 mds log max segments = 2'
769 if [ -n "$MON_ADDR" ]; then
770 CMON_ARGS
=" -m "$MON_ADDR
771 COSD_ARGS
=" -m "$MON_ADDR
772 CMDS_ARGS
=" -m "$MON_ADDR
775 if [ "$memstore" -eq 1 ]; then
777 osd objectstore = memstore'
779 if [ "$bluestore" -eq 1 ]; then
781 osd objectstore = bluestore'
784 if [ -z "$CEPH_PORT" ]; then
786 [ -e ".ceph_port" ] && CEPH_PORT
=`cat .ceph_port`
789 [ -z "$INIT_CEPH" ] && INIT_CEPH
=$CEPH_BIN/init-ceph
792 test -d $CEPH_DEV_DIR/osd
0/.
&& test -e $CEPH_DEV_DIR/sudo
&& SUDO
="sudo"
794 prun
$SUDO rm -f core
*
796 test -d $CEPH_OUT_DIR || mkdir
$CEPH_OUT_DIR
797 test -d $CEPH_DEV_DIR || mkdir
$CEPH_DEV_DIR
798 $SUDO rm -rf $CEPH_OUT_DIR/*
799 test -d gmon
&& $SUDO rm -rf gmon
/*
801 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && test -e $keyring_fn && rm $keyring_fn
804 # figure machine's ip
805 HOSTNAME
=`hostname -s`
806 if [ -n "$ip" ]; then
809 echo hostname
$HOSTNAME
810 if [ -x "$(which ip 2>/dev/null)" ]; then
815 # filter out IPv6 and localhost addresses
816 IP
="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
817 # if nothing left, try using localhost address, it might work
818 if [ -z "$IP" ]; then IP
="127.0.0.1"; fi
821 echo "port $CEPH_PORT"
824 [ -z $CEPH_ADM ] && CEPH_ADM
=$CEPH_BIN/ceph
827 if [ "$cephx" -eq 1 ]; then
828 prun
$SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
830 prun
$SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
834 if [ "$new" -eq 1 ]; then
838 if [ $CEPH_NUM_MON -gt 0 ]; then
843 if [ $CEPH_NUM_OSD -gt 0 ]; then
848 if [ "$smallmds" -eq 1 ]; then
851 mds log max segments = 2
852 mds cache size = 10000
856 if [ $CEPH_NUM_MDS -gt 0 ]; then
860 # Don't set max_mds until all the daemons are started, otherwise
861 # the intended standbys might end up in active roles.
862 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
863 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
866 for name
in a b c d e f g h i j k l m n o p
868 [ $fs -eq $CEPH_NUM_FS ] && break
870 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
871 ceph_adm fs
set "cephfs_${name}" allow_multimds true
--yes-i-really-mean-it
872 ceph_adm fs
set "cephfs_${name}" max_mds
"$CEPH_MAX_MDS"
878 if [ $CEPH_NUM_MGR -gt 0 ]; then
882 if [ "$ec" -eq 1 ]; then
884 osd erasure-code-profile set ec-profile m=2 k=2
885 osd pool create ec 8 8 erasure ec-profile
890 while [ -n "$*" ]; do
893 echo "creating cache for pool $p ..."
895 osd pool create ${p}-cache 8
896 osd tier add $p ${p}-cache
897 osd tier cache-mode ${p}-cache writeback
898 osd tier set-overlay $p ${p}-cache
905 while [ -n "$*" ]; do
910 echo "setting hit_set on pool $pool type $type ..."
912 osd pool set $pool hit_set_type $type
913 osd pool set $pool hit_set_count 8
914 osd pool set $pool hit_set_period 30
923 local akey
='0555b35654ad1656d804'
924 local skey
='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
925 echo "setting up user testid"
926 $CEPH_BIN/radosgw-admin user create
--uid testid
--access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com
-c $conf_fn > /dev
/null
928 # Create S3-test users
929 # See: https://github.com/ceph/s3-tests
930 echo "setting up s3-test users"
931 $CEPH_BIN/radosgw-admin user create \
932 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
933 --access-key ABCDEFGHIJKLMNOPQRST \
934 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
935 --display-name youruseridhere \
936 --email s3@example.com
-c $conf_fn > /dev
/null
937 $CEPH_BIN/radosgw-admin user create \
938 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
939 --access-key NOPQRSTUVWXYZABCDEFG \
940 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
941 --display-name john.doe \
942 --email john.doe@example.com
-c $conf_fn > /dev
/null
943 $CEPH_BIN/radosgw-admin user create \
945 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
946 --access-key HIJKLMNOPQRSTUVWXYZA \
947 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
948 --display-name tenanteduser \
949 --email tenanteduser@example.com
-c $conf_fn > /dev
/null
952 echo "setting up user tester"
953 $CEPH_BIN/radosgw-admin user create
-c $conf_fn --subuser=test:tester
--display-name=Tester-Subuser
--key-type=swift
--secret=testing
--access=full
> /dev
/null
957 echo " access key: $akey"
958 echo " secret key: $skey"
960 echo "Swift User Info:"
961 echo " account : test"
962 echo " user : tester"
963 echo " password : testing"
967 echo start rgw on http
://localhost
:$CEPH_RGW_PORT
969 if [ "$debug" -ne 0 ]; then
970 RGWDEBUG
="--debug-rgw=20"
974 [ $CEPH_RGW_PORT -lt 1024 ] && RGWSUDO
=sudo
975 n
=$
(($CEPH_NUM_RGW - 1))
976 for rgw
in `seq 0 $n`; do
977 run
'rgw' $RGWSUDO $CEPH_BIN/radosgw
-c $conf_fn --log-file=${CEPH_OUT_DIR}/rgw.
$rgw.log
${RGWDEBUG} --debug-ms=1
980 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
984 echo "started. stop.sh to stop. see out/* (e.g. 'tail -f out/????') for debug output."
987 echo "dashboard urls: $DASH_URLS"
988 echo " restful urls: $RESTFUL_URLS"
989 echo " w/ user/pass: admin / $RESTFUL_SECRET"
991 echo "export PYTHONPATH=./pybind:$PYTHONPATH"
992 echo "export LD_LIBRARY_PATH=$CEPH_LIB"
994 if [ "$CEPH_DIR" != "$PWD" ]; then
995 echo "export CEPH_CONF=$conf_fn"
996 echo "export CEPH_KEYRING=$keyring_fn"