6 if [ -n "$VSTART_DEST" ]; then
8 SRC_PATH
=`(cd $SRC_PATH; pwd)`
14 CEPH_CONF_PATH
=$VSTART_DEST
15 CEPH_DEV_DIR
=$VSTART_DEST/dev
16 CEPH_OUT_DIR
=$VSTART_DEST/out
19 # for running out of the CMake build directory
20 if [ -e CMakeCache.txt
]; then
21 # Out of tree build, learn source location from CMakeCache.txt
22 CEPH_ROOT
=`grep ceph_SOURCE_DIR CMakeCache.txt | cut -d "=" -f 2`
24 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH
=$CEPH_ROOT/src
/pybind
/mgr
27 # use CEPH_BUILD_ROOT to vstart from a 'make install'
28 if [ -n "$CEPH_BUILD_ROOT" ]; then
29 [ -z "$CEPH_BIN" ] && CEPH_BIN
=$CEPH_BUILD_ROOT/bin
30 [ -z "$CEPH_LIB" ] && CEPH_LIB
=$CEPH_BUILD_ROOT/lib
31 [ -z "$EC_PATH" ] && EC_PATH
=$CEPH_LIB/erasure-code
32 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH
=$CEPH_LIB/rados-classes
33 elif [ -n "$CEPH_ROOT" ]; then
34 [ -z "$PYBIND" ] && PYBIND
=$CEPH_ROOT/src
/pybind
35 [ -z "$CEPH_BIN" ] && CEPH_BIN
=$CEPH_BUILD_DIR/bin
36 [ -z "$CEPH_ADM" ] && CEPH_ADM
=$CEPH_BIN/ceph
37 [ -z "$INIT_CEPH" ] && INIT_CEPH
=$CEPH_BUILD_DIR/bin
/init-ceph
38 [ -z "$CEPH_LIB" ] && CEPH_LIB
=$CEPH_BUILD_DIR/lib
39 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH
=$CEPH_LIB
40 [ -z "$EC_PATH" ] && EC_PATH
=$CEPH_LIB
43 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
47 [ -z "$PYBIND" ] && PYBIND
=.
/pybind
49 export PYTHONPATH
=$PYBIND:$CEPH_LIB/cython_modules
/lib
.2:$PYTHONPATH
50 export LD_LIBRARY_PATH
=$CEPH_LIB:$LD_LIBRARY_PATH
51 export DYLD_LIBRARY_PATH
=$CEPH_LIB:$DYLD_LIBRARY_PATH
53 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON
="$MON"
54 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD
="$OSD"
55 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS
="$MDS"
56 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR
="$MGR"
57 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS
="$FS"
58 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW
="$RGW"
60 # if none of the CEPH_NUM_* number is specified, kill the existing
62 if [ -z "$CEPH_NUM_MON" -a \
63 -z "$CEPH_NUM_OSD" -a \
64 -z "$CEPH_NUM_MDS" -a \
65 -z "$CEPH_NUM_MGR" ]; then
71 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON
=3
72 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD
=3
73 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS
=3
74 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR
=1
75 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS
=1
76 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS
=1
77 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW
=0
79 [ -z "$CEPH_DIR" ] && CEPH_DIR
="$PWD"
80 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR
="$CEPH_DIR/dev"
81 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR
="$CEPH_DIR/out"
82 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT
=8000
83 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH
=$CEPH_DIR
85 if [ $CEPH_NUM_OSD -gt 3 ]; then
86 OSD_POOL_DEFAULT_SIZE
=3
88 OSD_POOL_DEFAULT_SIZE
=$CEPH_NUM_OSD
102 cephx
=1 #turn cephx on by default
106 rgw_frontend
="civetweb"
107 lockdep
=${LOCKDEP:-1}
109 VSTART_SEC
="client.vstart.sh"
113 conf_fn
="$CEPH_CONF_PATH/ceph.conf"
114 keyring_fn
="$CEPH_CONF_PATH/keyring"
115 osdmap_fn
="/tmp/ceph_osdmap.$$"
116 monmap_fn
="/tmp/ceph_monmap.$$"
118 usage
="usage: $0 [option]... \nex: $0 -n -d --mon_num 3 --osd_num 3 --mds_num 1 --rgw_num 1\n"
119 usage
=$usage"options:\n"
120 usage
=$usage"\t-d, --debug\n"
121 usage
=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
122 usage
=$usage"\t-l, --localhost: use localhost instead of hostname\n"
123 usage
=$usage"\t-i <ip>: bind to specific ip\n"
124 usage
=$usage"\t-n, --new\n"
125 usage
=$usage"\t-N, --not-new: reuse existing cluster config (default)\n"
126 usage
=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
127 usage
=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
128 usage
=$usage"\t--smallmds: limit mds cache size\n"
129 usage
=$usage"\t-m ip:port\t\tspecify monitor address\n"
130 usage
=$usage"\t-k keep old configuration files\n"
131 usage
=$usage"\t-x enable cephx (on by default)\n"
132 usage
=$usage"\t-X disable cephx\n"
133 usage
=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
134 usage
=$usage"\t-e : create an erasure pool\n";
135 usage
=$usage"\t-o config\t\t add extra config parameters to all sections\n"
136 usage
=$usage"\t--mon_num specify ceph monitor count\n"
137 usage
=$usage"\t--osd_num specify ceph osd count\n"
138 usage
=$usage"\t--mds_num specify ceph mds count\n"
139 usage
=$usage"\t--rgw_num specify ceph rgw count\n"
140 usage
=$usage"\t--mgr_num specify ceph mgr count\n"
141 usage
=$usage"\t--rgw_port specify ceph rgw http listen port\n"
142 usage
=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
143 usage
=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend\n"
144 usage
=$usage"\t--memstore use memstore as the osd objectstore backend\n"
145 usage
=$usage"\t--cache <pool>: enable cache tiering on pool\n"
146 usage
=$usage"\t--short: short object names only; necessary for ext4 dev\n"
147 usage
=$usage"\t--nolockdep disable lockdep\n"
148 usage
=$usage"\t--multimds <count> allow multimds with maximum active count\n"
155 while [ $# -ge 1 ]; do
167 [ -z "$2" ] && usage_exit
184 [ -z "$2" ] && usage_exit
193 [ -z "$2" ] && usage_exit
198 [ -z "$2" ] && usage_exit
203 [ -z "$2" ] && usage_exit
208 [ -z "$2" ] && usage_exit
213 [ -z "$2" ] && usage_exit
253 [ -z "$2" ] && usage_exit
258 cephx
=1 # this is on be default, flag exists for historical consistency
264 if [ ! -r $conf_fn ]; then
265 echo "cannot use old configuration: $conf_fn not readable." >&2
277 hitset
="$hitset $2 $3"
282 extra_conf
="$extra_conf $2
287 if [ -z "$cache" ]; then
307 if [ $kill_all -eq 1 ]; then
308 $SUDO $INIT_CEPH stop
311 if [ "$overwrite_conf" -eq 0 ]; then
312 MON
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mon 2>/dev/null` && \
314 OSD
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_osd 2>/dev/null` && \
316 MDS
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mds 2>/dev/null` && \
318 MGR
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mgr 2>/dev/null` && \
320 RGW
=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_rgw 2>/dev/null` && \
323 if [ "$new" -ne 0 ]; then
325 [ -e "$conf_fn" ] && rm -- "$conf_fn"
327 # -k is implied... (doesn't make sense otherwise)
347 eval "valg=\$valgrind_$type"
348 [ -z "$valg" ] && valg
="$valgrind"
350 if [ -n "$valg" ]; then
351 prunb valgrind
--tool="$valg" $valgrind_args "$@" -f
354 if [ "$nodaemon" -eq 0 ]; then
357 prunb .
/ceph-run
"$@" -f
363 if [ "$overwrite_conf" -eq 1 ]; then
370 log file = $CEPH_OUT_DIR/\$name.log
371 admin socket = $CEPH_OUT_DIR/\$name.asok
373 pid file = $CEPH_OUT_DIR/\$name.pid
374 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
378 ; generated by vstart.sh on `date`
380 num mon = $CEPH_NUM_MON
381 num osd = $CEPH_NUM_OSD
382 num mds = $CEPH_NUM_MDS
383 num mgr = $CEPH_NUM_MGR
384 num rgw = $CEPH_NUM_RGW
389 osd pgp bits = 5 ; (invalid, but ceph should cope!)
390 osd pool default size = $OSD_POOL_DEFAULT_SIZE
391 osd crush chooseleaf type = 0
392 osd pool default min size = 1
393 osd failsafe full ratio = .99
394 mon osd reporter subtree level = osd
395 mon osd full ratio = .99
396 mon data avail warn = 10
397 mon data avail crit = 1
398 erasure code dir = $EC_PATH
399 plugin dir = $CEPH_LIB
400 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd
401 rgw frontends = $rgw_frontend port=$CEPH_RGW_PORT
402 filestore fd cache size = 32
403 run dir = $CEPH_OUT_DIR
404 enable experimental unrecoverable data corrupting features = *
406 if [ "$lockdep" -eq 1 ] ; then
411 if [ "$cephx" -eq 1 ] ; then
413 auth cluster required = cephx
414 auth service required = cephx
415 auth client required = cephx
419 auth cluster required = none
420 auth service required = none
421 auth client required = none
424 if [ "$short" -eq 1 ]; then
425 COSDSHORT
=" osd max object name len = 460
426 osd max object namespace len = 64"
430 keyring = $keyring_fn
431 log file = $CEPH_OUT_DIR/\$name.\$pid.log
432 admin socket = $CEPH_OUT_DIR/\$name.\$pid.asok
437 mds debug frag = true
438 mds debug auth pins = true
439 mds debug subtrees = true
440 mds data = $CEPH_DEV_DIR/mds.\$id
441 mds root ino uid = `id -u`
442 mds root ino gid = `id -g`
445 mgr modules = rest fsstatus
446 mgr data = $CEPH_DEV_DIR/mgr.\$id
447 mgr module path = $MGR_PYTHON_PATH
448 mon reweight min pgs per osd = 4
454 osd_check_max_object_name_len_on_startup = false
455 osd data = $CEPH_DEV_DIR/osd\$id
456 osd journal = $CEPH_DEV_DIR/osd\$id/journal
457 osd journal size = 100
459 osd class dir = $OBJCLASS_PATH
460 osd class load list = *
461 osd class default list = *
462 osd scrub load threshold = 2000.0
463 osd debug op order = true
464 osd debug misdirected ops = true
465 filestore wbthrottle xfs ios start flusher = 10
466 filestore wbthrottle xfs ios hard limit = 20
467 filestore wbthrottle xfs inodes hard limit = 30
468 filestore wbthrottle btrfs ios start flusher = 10
469 filestore wbthrottle btrfs ios hard limit = 20
470 filestore wbthrottle btrfs inodes hard limit = 30
471 osd copyfrom max chunk = 524288
472 bluestore fsck on mount = true
473 bluestore block create = true
474 bluestore block db size = 67108864
475 bluestore block db create = true
476 bluestore block wal size = 1048576000
477 bluestore block wal create = true
483 mon pg warn min per osd = 3
484 mon osd allow primary affinity = true
485 mon osd allow pg upmap = true
486 mon reweight min pgs per osd = 4
487 mon osd prime pg temp = true
488 crushtool = $CEPH_BIN/crushtool
489 mon allow pool delete = true
493 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
502 for f
in a b c d e f g h i j k l m n o p q r s t u v w x y z
504 [ $count -eq $CEPH_NUM_MON ] && break;
505 count
=$
(($count + 1))
514 if [ "$new" -eq 1 ]; then
515 if [ `echo $IP | grep '^127\\.'` ]
518 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
519 echo " connect. either adjust /etc/hosts, or edit this script to use your"
520 echo " machine's real IP."
524 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon.
"$keyring_fn" --cap mon
'allow *'
525 prun
$SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin
--set-uid=0 \
526 --cap mon
'allow *' \
527 --cap osd
'allow *' \
528 --cap mds
'allow *' \
529 --cap mgr
'allow *' \
532 # build a fresh fs monmap, mon fs
537 str
="$str --add $f $IP:$(($CEPH_PORT+$count))"
541 mon data = $CEPH_DEV_DIR/mon.$f
542 mon addr = $IP:$(($CEPH_PORT+$count))
544 count
=$
(($count + 1))
546 prun
"$CEPH_BIN/monmaptool" --create --clobber $str --print "$monmap_fn"
550 prun
rm -rf -- "$CEPH_DEV_DIR/mon.$f"
551 prun mkdir
-p "$CEPH_DEV_DIR/mon.$f"
552 prun
"$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
555 prun
rm -- "$monmap_fn"
561 run
'mon' $CEPH_BIN/ceph-mon
-i $f $ARGS $CMON_ARGS
566 for osd
in `seq 0 $((CEPH_NUM_OSD-1))`
568 if [ "$new" -eq 1 ]; then
574 rm -rf $CEPH_DEV_DIR/osd
$osd || true
575 if command -v btrfs
> /dev
/null
; then
576 for f
in $CEPH_DEV_DIR/osd
$osd/*; do btrfs sub delete
$f &> /dev
/null || true
; done
578 mkdir
-p $CEPH_DEV_DIR/osd
$osd
581 echo "add osd$osd $uuid"
582 ceph_adm osd create
$uuid
583 ceph_adm osd crush add osd.
$osd 1.0 host=$HOSTNAME root
=default
584 $SUDO $CEPH_BIN/ceph-osd
-i $osd $ARGS --mkfs --mkkey --osd-uuid $uuid
586 local key_fn
=$CEPH_DEV_DIR/osd
$osd/keyring
587 echo adding osd
$osd key to auth repository
588 ceph_adm
-i "$key_fn" auth add osd.
$osd osd
"allow *" mon
"allow profile osd" mgr
"allow profile osd"
591 run
'osd' $SUDO $CEPH_BIN/ceph-osd
-i $osd $ARGS $COSD_ARGS
597 for name
in x y z a b c d e f g h i j k l m n o p
599 [ $mgr -eq $CEPH_NUM_MGR ] && break
601 if [ "$new" -eq 1 ]; then
602 mkdir
-p $CEPH_DEV_DIR/mgr.
$name
603 key_fn
=$CEPH_DEV_DIR/mgr.
$name/keyring
604 $SUDO $CEPH_BIN/ceph-authtool
--create-keyring --gen-key --name=mgr.
$name $key_fn
605 ceph_adm
-i $key_fn auth add mgr.
$name mon
'allow profile mgr' mds
'allow *' osd
'allow *'
613 echo "Starting mgr.${name}"
614 run
'mgr' $CEPH_BIN/ceph-mgr
-i $name $ARGS
619 if [ $new -eq 1 ]; then
620 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
621 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
622 ceph_adm fs flag
set enable_multiple true
--yes-i-really-mean-it
626 for name
in a b c d e f g h i j k l m n o p
628 ceph_adm osd pool create
"cephfs_data_${name}" 8
629 ceph_adm osd pool create
"cephfs_metadata_${name}" 8
630 ceph_adm fs new
"cephfs_${name}" "cephfs_metadata_${name}" "cephfs_data_${name}"
632 [ $fs -eq $CEPH_NUM_FS ] && break
638 for name
in a b c d e f g h i j k l m n o p
640 [ $mds -eq $CEPH_NUM_MDS ] && break
643 if [ "$new" -eq 1 ]; then
644 prun mkdir
-p "$CEPH_DEV_DIR/mds.$name"
645 key_fn
=$CEPH_DEV_DIR/mds.
$name/keyring
650 if [ "$standby" -eq 1 ]; then
651 mkdir
-p $CEPH_DEV_DIR/mds.
${name}s
653 mds standby for rank = $mds
655 mds standby replay = true
656 mds standby for name = ${name}
659 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
660 ceph_adm
-i "$key_fn" auth add
"mds.$name" mon
'allow profile mds' osd
'allow *' mds
'allow' mgr
'allow profile mds'
661 if [ "$standby" -eq 1 ]; then
662 prun
$SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
663 "$CEPH_DEV_DIR/mds.${name}s/keyring"
664 ceph_adm
-i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add
"mds.${name}s" \
665 mon
'allow profile mds' osd
'allow *' mds
'allow' mgr
'allow profile mds'
669 run
'mds' $CEPH_BIN/ceph-mds
-i $name $ARGS $CMDS_ARGS
670 if [ "$standby" -eq 1 ]; then
671 run
'mds' $CEPH_BIN/ceph-mds
-i ${name}s
$ARGS $CMDS_ARGS
674 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
675 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
676 #ceph_adm mds set max_mds 2
680 if [ "$debug" -eq 0 ]; then
689 echo "** going verbose **"
716 mds debug scatterstat = true
717 mds verify scatter = true
718 mds log max segments = 2'
725 if [ -n "$MON_ADDR" ]; then
726 CMON_ARGS
=" -m "$MON_ADDR
727 COSD_ARGS
=" -m "$MON_ADDR
728 CMDS_ARGS
=" -m "$MON_ADDR
731 if [ "$memstore" -eq 1 ]; then
733 osd objectstore = memstore'
735 if [ "$bluestore" -eq 1 ]; then
737 osd objectstore = bluestore'
740 if [ -z "$CEPH_PORT" ]; then
742 [ -e ".ceph_port" ] && CEPH_PORT
=`cat .ceph_port`
745 [ -z "$INIT_CEPH" ] && INIT_CEPH
=$CEPH_BIN/init-ceph
748 test -d $CEPH_DEV_DIR/osd
0/.
&& test -e $CEPH_DEV_DIR/sudo
&& SUDO
="sudo"
750 prun
$SUDO rm -f core
*
752 test -d $CEPH_OUT_DIR || mkdir
$CEPH_OUT_DIR
753 test -d $CEPH_DEV_DIR || mkdir
$CEPH_DEV_DIR
754 $SUDO rm -rf $CEPH_OUT_DIR/*
755 test -d gmon
&& $SUDO rm -rf gmon
/*
757 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && test -e $keyring_fn && rm $keyring_fn
760 # figure machine's ip
761 HOSTNAME
=`hostname -s`
762 if [ -n "$ip" ]; then
765 echo hostname
$HOSTNAME
766 if [ -x "$(which ip 2>/dev/null)" ]; then
771 # filter out IPv6 and localhost addresses
772 IP
="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
773 # if nothing left, try using localhost address, it might work
774 if [ -z "$IP" ]; then IP
="127.0.0.1"; fi
777 echo "port $CEPH_PORT"
780 [ -z $CEPH_ADM ] && CEPH_ADM
=$CEPH_BIN/ceph
783 if [ "$cephx" -eq 1 ]; then
784 prun
$SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
786 prun
$SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
790 if [ "$new" -eq 1 ]; then
794 if [ $CEPH_NUM_MON -gt 0 ]; then
799 if [ $CEPH_NUM_OSD -gt 0 ]; then
804 if [ "$smallmds" -eq 1 ]; then
807 mds log max segments = 2
808 mds cache size = 10000
812 if [ $CEPH_NUM_MDS -gt 0 ]; then
816 # Don't set max_mds until all the daemons are started, otherwise
817 # the intended standbys might end up in active roles.
818 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
819 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
822 for name
in a b c d e f g h i j k l m n o p
824 [ $fs -eq $CEPH_NUM_FS ] && break
826 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
827 ceph_adm fs
set "cephfs_${name}" allow_multimds true
--yes-i-really-mean-it
828 ceph_adm fs
set "cephfs_${name}" max_mds
"$CEPH_MAX_MDS"
834 if [ $CEPH_NUM_MGR -gt 0 ]; then
838 if [ "$ec" -eq 1 ]; then
840 osd erasure-code-profile set ec-profile m=2 k=2
841 osd pool create ec 8 8 erasure ec-profile
846 while [ -n "$*" ]; do
849 echo "creating cache for pool $p ..."
851 osd pool create ${p}-cache 8
852 osd tier add $p ${p}-cache
853 osd tier cache-mode ${p}-cache writeback
854 osd tier set-overlay $p ${p}-cache
861 while [ -n "$*" ]; do
866 echo "setting hit_set on pool $pool type $type ..."
868 osd pool set $pool hit_set_type $type
869 osd pool set $pool hit_set_count 8
870 osd pool set $pool hit_set_period 30
879 local akey
='0555b35654ad1656d804'
880 local skey
='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
881 echo "setting up user testid"
882 $CEPH_BIN/radosgw-admin user create
--uid testid
--access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com
-c $conf_fn > /dev
/null
884 # Create S3-test users
885 # See: https://github.com/ceph/s3-tests
886 echo "setting up s3-test users"
887 $CEPH_BIN/radosgw-admin user create \
888 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
889 --access-key ABCDEFGHIJKLMNOPQRST \
890 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
891 --display-name youruseridhere \
892 --email s3@example.com
-c $conf_fn > /dev
/null
893 $CEPH_BIN/radosgw-admin user create \
894 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
895 --access-key NOPQRSTUVWXYZABCDEFG \
896 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
897 --display-name john.doe \
898 --email john.doe@example.com
-c $conf_fn > /dev
/null
901 echo "setting up user tester"
902 $CEPH_BIN/radosgw-admin user create
-c $conf_fn --subuser=test:tester
--display-name=Tester-Subuser
--key-type=swift
--secret=testing
--access=full
> /dev
/null
906 echo " access key: $akey"
907 echo " secret key: $skey"
909 echo "Swift User Info:"
910 echo " account : test"
911 echo " user : tester"
912 echo " password : testing"
916 echo start rgw on http
://localhost
:$CEPH_RGW_PORT
918 if [ "$debug" -ne 0 ]; then
919 RGWDEBUG
="--debug-rgw=20"
923 [ $CEPH_RGW_PORT -lt 1024 ] && RGWSUDO
=sudo
924 n
=$
(($CEPH_NUM_RGW - 1))
925 for rgw
in `seq 0 $n`; do
926 run
'rgw' $RGWSUDO $CEPH_BIN/radosgw
-c $conf_fn --log-file=${CEPH_OUT_DIR}/rgw.
$rgw.log
${RGWDEBUG} --debug-ms=1
929 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
933 echo "started. stop.sh to stop. see out/* (e.g. 'tail -f out/????') for debug output."
936 echo "export PYTHONPATH=./pybind:$PYTHONPATH"
937 echo "export LD_LIBRARY_PATH=$CEPH_LIB"
939 if [ "$CEPH_DIR" != "$PWD" ]; then
940 echo "export CEPH_CONF=$conf_fn"
941 echo "export CEPH_KEYRING=$keyring_fn"