]> git.proxmox.com Git - ceph.git/blob - ceph/src/vstart.sh
f796a3c721a8e71bf7c626c66c582f1f5ba023d4
[ceph.git] / ceph / src / vstart.sh
1 #!/bin/bash
2
3 # abort on failure
4 set -e
5
6 if [ -n "$VSTART_DEST" ]; then
7 SRC_PATH=`dirname $0`
8 SRC_PATH=`(cd $SRC_PATH; pwd)`
9
10 CEPH_DIR=$SRC_PATH
11 CEPH_BIN=${PWD}/bin
12 CEPH_LIB=${PWD}/lib
13
14 CEPH_CONF_PATH=$VSTART_DEST
15 CEPH_DEV_DIR=$VSTART_DEST/dev
16 CEPH_OUT_DIR=$VSTART_DEST/out
17 fi
18
19 # for running out of the CMake build directory
20 if [ -e CMakeCache.txt ]; then
21 # Out of tree build, learn source location from CMakeCache.txt
22 CEPH_ROOT=`grep ceph_SOURCE_DIR CMakeCache.txt | cut -d "=" -f 2`
23 CEPH_BUILD_DIR=`pwd`
24 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
25 fi
26
27 # use CEPH_BUILD_ROOT to vstart from a 'make install'
28 if [ -n "$CEPH_BUILD_ROOT" ]; then
29 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
30 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
31 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
32 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
33 elif [ -n "$CEPH_ROOT" ]; then
34 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
35 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
36 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
37 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BUILD_DIR/bin/init-ceph
38 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
39 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
40 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
41 fi
42
43 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
44 PATH=$(pwd):$PATH
45 fi
46
47 [ -z "$PYBIND" ] && PYBIND=./pybind
48
49 export PYTHONPATH=$PYBIND:$CEPH_LIB/cython_modules/lib.2:$PYTHONPATH
50 export LD_LIBRARY_PATH=$CEPH_LIB:$LD_LIBRARY_PATH
51 export DYLD_LIBRARY_PATH=$CEPH_LIB:$DYLD_LIBRARY_PATH
52 # Suppress logging for regular use that indicated that we are using a
53 # development version. vstart.sh is only used during testing and
54 # development
55 export CEPH_DEV=1
56
57 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
58 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
59 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
60 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
61 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
62 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
63
64 # if none of the CEPH_NUM_* number is specified, kill the existing
65 # cluster.
66 if [ -z "$CEPH_NUM_MON" -a \
67 -z "$CEPH_NUM_OSD" -a \
68 -z "$CEPH_NUM_MDS" -a \
69 -z "$CEPH_NUM_MGR" ]; then
70 kill_all=1
71 else
72 kill_all=0
73 fi
74
75 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
76 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
77 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
78 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
79 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
80 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
81 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
82
83 [ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
84 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
85 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
86 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
87 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
88
89 if [ $CEPH_NUM_OSD -gt 3 ]; then
90 OSD_POOL_DEFAULT_SIZE=3
91 else
92 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
93 fi
94
95 extra_conf=""
96 new=0
97 standby=0
98 debug=0
99 ip=""
100 nodaemon=0
101 smallmds=0
102 short=0
103 ec=0
104 hitset=""
105 overwrite_conf=1
106 cephx=1 #turn cephx on by default
107 cache=""
108 memstore=0
109 bluestore=0
110 rgw_frontend="civetweb"
111 lockdep=${LOCKDEP:-1}
112
113 filestore_path=
114
115 VSTART_SEC="client.vstart.sh"
116
117 MON_ADDR=""
118 DASH_URLS=""
119 RESTFUL_URLS=""
120
121 conf_fn="$CEPH_CONF_PATH/ceph.conf"
122 keyring_fn="$CEPH_CONF_PATH/keyring"
123 osdmap_fn="/tmp/ceph_osdmap.$$"
124 monmap_fn="/tmp/ceph_monmap.$$"
125
126 usage="usage: $0 [option]... \nex: $0 -n -d --mon_num 3 --osd_num 3 --mds_num 1 --rgw_num 1\n"
127 usage=$usage"options:\n"
128 usage=$usage"\t-d, --debug\n"
129 usage=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
130 usage=$usage"\t-l, --localhost: use localhost instead of hostname\n"
131 usage=$usage"\t-i <ip>: bind to specific ip\n"
132 usage=$usage"\t-n, --new\n"
133 usage=$usage"\t-N, --not-new: reuse existing cluster config (default)\n"
134 usage=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
135 usage=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
136 usage=$usage"\t--smallmds: limit mds cache size\n"
137 usage=$usage"\t-m ip:port\t\tspecify monitor address\n"
138 usage=$usage"\t-k keep old configuration files\n"
139 usage=$usage"\t-x enable cephx (on by default)\n"
140 usage=$usage"\t-X disable cephx\n"
141 usage=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
142 usage=$usage"\t-e : create an erasure pool\n";
143 usage=$usage"\t-o config\t\t add extra config parameters to all sections\n"
144 usage=$usage"\t--mon_num specify ceph monitor count\n"
145 usage=$usage"\t--osd_num specify ceph osd count\n"
146 usage=$usage"\t--mds_num specify ceph mds count\n"
147 usage=$usage"\t--rgw_num specify ceph rgw count\n"
148 usage=$usage"\t--mgr_num specify ceph mgr count\n"
149 usage=$usage"\t--rgw_port specify ceph rgw http listen port\n"
150 usage=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
151 usage=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend\n"
152 usage=$usage"\t--memstore use memstore as the osd objectstore backend\n"
153 usage=$usage"\t--cache <pool>: enable cache tiering on pool\n"
154 usage=$usage"\t--short: short object names only; necessary for ext4 dev\n"
155 usage=$usage"\t--nolockdep disable lockdep\n"
156 usage=$usage"\t--multimds <count> allow multimds with maximum active count\n"
157
158 usage_exit() {
159 printf "$usage"
160 exit
161 }
162
163 while [ $# -ge 1 ]; do
164 case $1 in
165 -d | --debug )
166 debug=1
167 ;;
168 -s | --standby_mds)
169 standby=1
170 ;;
171 -l | --localhost )
172 ip="127.0.0.1"
173 ;;
174 -i )
175 [ -z "$2" ] && usage_exit
176 ip="$2"
177 shift
178 ;;
179 -e )
180 ec=1
181 ;;
182 --new | -n )
183 new=1
184 ;;
185 --not-new | -N )
186 new=0
187 ;;
188 --short )
189 short=1
190 ;;
191 --valgrind )
192 [ -z "$2" ] && usage_exit
193 valgrind=$2
194 shift
195 ;;
196 --valgrind_args )
197 valgrind_args="$2"
198 shift
199 ;;
200 --valgrind_mds )
201 [ -z "$2" ] && usage_exit
202 valgrind_mds=$2
203 shift
204 ;;
205 --valgrind_osd )
206 [ -z "$2" ] && usage_exit
207 valgrind_osd=$2
208 shift
209 ;;
210 --valgrind_mon )
211 [ -z "$2" ] && usage_exit
212 valgrind_mon=$2
213 shift
214 ;;
215 --valgrind_mgr )
216 [ -z "$2" ] && usage_exit
217 valgrind_mgr=$2
218 shift
219 ;;
220 --valgrind_rgw )
221 [ -z "$2" ] && usage_exit
222 valgrind_rgw=$2
223 shift
224 ;;
225 --nodaemon )
226 nodaemon=1
227 ;;
228 --smallmds )
229 smallmds=1
230 ;;
231 --mon_num )
232 echo "mon_num:$2"
233 CEPH_NUM_MON="$2"
234 shift
235 ;;
236 --osd_num )
237 CEPH_NUM_OSD=$2
238 shift
239 ;;
240 --mds_num )
241 CEPH_NUM_MDS=$2
242 shift
243 ;;
244 --rgw_num )
245 CEPH_NUM_RGW=$2
246 shift
247 ;;
248 --mgr_num )
249 CEPH_NUM_MGR=$2
250 shift
251 ;;
252 --rgw_port )
253 CEPH_RGW_PORT=$2
254 shift
255 ;;
256 --rgw_frontend )
257 rgw_frontend=$2
258 shift
259 ;;
260 --filestore_path )
261 filestore_path=$2
262 shift
263 ;;
264 -m )
265 [ -z "$2" ] && usage_exit
266 MON_ADDR=$2
267 shift
268 ;;
269 -x )
270 cephx=1 # this is on be default, flag exists for historical consistency
271 ;;
272 -X )
273 cephx=0
274 ;;
275 -k )
276 if [ ! -r $conf_fn ]; then
277 echo "cannot use old configuration: $conf_fn not readable." >&2
278 exit
279 fi
280 overwrite_conf=0
281 ;;
282 --memstore )
283 memstore=1
284 ;;
285 -b | --bluestore )
286 bluestore=1
287 ;;
288 --hitset )
289 hitset="$hitset $2 $3"
290 shift
291 shift
292 ;;
293 -o )
294 extra_conf="$extra_conf $2
295 "
296 shift
297 ;;
298 --cache )
299 if [ -z "$cache" ]; then
300 cache="$2"
301 else
302 cache="$cache $2"
303 fi
304 shift
305 ;;
306 --nolockdep )
307 lockdep=0
308 ;;
309 --multimds)
310 CEPH_MAX_MDS="$2"
311 shift
312 ;;
313 * )
314 usage_exit
315 esac
316 shift
317 done
318
319 if [ $kill_all -eq 1 ]; then
320 $SUDO $INIT_CEPH stop
321 fi
322
323 if [ "$overwrite_conf" -eq 0 ]; then
324 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mon 2>/dev/null` && \
325 CEPH_NUM_MON="$MON"
326 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_osd 2>/dev/null` && \
327 CEPH_NUM_OSD="$OSD"
328 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mds 2>/dev/null` && \
329 CEPH_NUM_MDS="$MDS"
330 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mgr 2>/dev/null` && \
331 CEPH_NUM_MGR="$MGR"
332 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_rgw 2>/dev/null` && \
333 CEPH_NUM_RGW="$RGW"
334 else
335 if [ "$new" -ne 0 ]; then
336 # only delete if -n
337 [ -e "$conf_fn" ] && rm -- "$conf_fn"
338 else
339 # -k is implied... (doesn't make sense otherwise)
340 overwrite_conf=0
341 fi
342 fi
343
344 ARGS="-c $conf_fn"
345
346 prunb() {
347 echo "$* &"
348 "$@" &
349 }
350
351 prun() {
352 echo "$*"
353 "$@"
354 }
355
356 run() {
357 type=$1
358 shift
359 eval "valg=\$valgrind_$type"
360 [ -z "$valg" ] && valg="$valgrind"
361
362 if [ -n "$valg" ]; then
363 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
364 sleep 1
365 else
366 if [ "$nodaemon" -eq 0 ]; then
367 prun "$@"
368 else
369 prunb ./ceph-run "$@" -f
370 fi
371 fi
372 }
373
374 wconf() {
375 if [ "$overwrite_conf" -eq 1 ]; then
376 cat >> "$conf_fn"
377 fi
378 }
379
380 prepare_conf() {
381 local DAEMONOPTS="
382 log file = $CEPH_OUT_DIR/\$name.log
383 admin socket = $CEPH_OUT_DIR/\$name.asok
384 chdir = \"\"
385 pid file = $CEPH_OUT_DIR/\$name.pid
386 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
387 "
388
389 wconf <<EOF
390 ; generated by vstart.sh on `date`
391 [$VSTART_SEC]
392 num mon = $CEPH_NUM_MON
393 num osd = $CEPH_NUM_OSD
394 num mds = $CEPH_NUM_MDS
395 num mgr = $CEPH_NUM_MGR
396 num rgw = $CEPH_NUM_RGW
397
398 [global]
399 fsid = $(uuidgen)
400 osd pg bits = 3
401 osd pgp bits = 5 ; (invalid, but ceph should cope!)
402 osd pool default size = $OSD_POOL_DEFAULT_SIZE
403 osd crush chooseleaf type = 0
404 osd pool default min size = 1
405 osd failsafe full ratio = .99
406 mon osd reporter subtree level = osd
407 mon osd full ratio = .99
408 mon data avail warn = 10
409 mon data avail crit = 1
410 erasure code dir = $EC_PATH
411 plugin dir = $CEPH_LIB
412 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd
413 rgw frontends = $rgw_frontend port=$CEPH_RGW_PORT
414 ; needed for s3tests
415 rgw enable static website = 1
416 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
417 rgw crypt require ssl = false
418 rgw lc debug interval = 10
419 filestore fd cache size = 32
420 run dir = $CEPH_OUT_DIR
421 enable experimental unrecoverable data corrupting features = *
422 EOF
423 if [ "$lockdep" -eq 1 ] ; then
424 wconf <<EOF
425 lockdep = true
426 EOF
427 fi
428 if [ "$cephx" -eq 1 ] ; then
429 wconf <<EOF
430 auth cluster required = cephx
431 auth service required = cephx
432 auth client required = cephx
433 EOF
434 else
435 wconf <<EOF
436 auth cluster required = none
437 auth service required = none
438 auth client required = none
439 EOF
440 fi
441 if [ "$short" -eq 1 ]; then
442 COSDSHORT=" osd max object name len = 460
443 osd max object namespace len = 64"
444 fi
445 wconf <<EOF
446 [client]
447 keyring = $keyring_fn
448 log file = $CEPH_OUT_DIR/\$name.\$pid.log
449 admin socket = $CEPH_OUT_DIR/\$name.\$pid.asok
450
451 [mds]
452 $DAEMONOPTS
453 $CMDSDEBUG
454 mds debug frag = true
455 mds debug auth pins = true
456 mds debug subtrees = true
457 mds data = $CEPH_DEV_DIR/mds.\$id
458 mds root ino uid = `id -u`
459 mds root ino gid = `id -g`
460 $extra_conf
461 [mgr]
462 mgr modules = restful fsstatus dashboard
463 mgr data = $CEPH_DEV_DIR/mgr.\$id
464 mgr module path = $MGR_PYTHON_PATH
465 mon reweight min pgs per osd = 4
466 mon pg warn min per osd = 3
467 $DAEMONOPTS
468 $CMGRDEBUG
469 $extra_conf
470 [osd]
471 $DAEMONOPTS
472 osd_check_max_object_name_len_on_startup = false
473 osd data = $CEPH_DEV_DIR/osd\$id
474 osd journal = $CEPH_DEV_DIR/osd\$id/journal
475 osd journal size = 100
476 osd class tmp = out
477 osd class dir = $OBJCLASS_PATH
478 osd class load list = *
479 osd class default list = *
480 osd scrub load threshold = 2000.0
481 osd debug op order = true
482 osd debug misdirected ops = true
483 filestore wbthrottle xfs ios start flusher = 10
484 filestore wbthrottle xfs ios hard limit = 20
485 filestore wbthrottle xfs inodes hard limit = 30
486 filestore wbthrottle btrfs ios start flusher = 10
487 filestore wbthrottle btrfs ios hard limit = 20
488 filestore wbthrottle btrfs inodes hard limit = 30
489 osd copyfrom max chunk = 524288
490 bluestore fsck on mount = true
491 bluestore block create = true
492 bluestore block db size = 67108864
493 bluestore block db create = true
494 bluestore block wal size = 1048576000
495 bluestore block wal create = true
496 $COSDDEBUG
497 $COSDMEMSTORE
498 $COSDSHORT
499 $extra_conf
500 [mon]
501 mon pg warn min per osd = 3
502 mon osd allow primary affinity = true
503 mon osd allow pg upmap = true
504 mon reweight min pgs per osd = 4
505 mon osd prime pg temp = true
506 crushtool = $CEPH_BIN/crushtool
507 mon allow pool delete = true
508 $DAEMONOPTS
509 $CMONDEBUG
510 $extra_conf
511 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
512 [global]
513 $extra_conf
514 EOF
515 }
516
517 start_mon() {
518 local MONS=""
519 local count=0
520 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
521 do
522 [ $count -eq $CEPH_NUM_MON ] && break;
523 count=$(($count + 1))
524 if [ -z "$MONS" ];
525 then
526 MONS="$f"
527 else
528 MONS="$MONS $f"
529 fi
530 done
531
532 if [ "$new" -eq 1 ]; then
533 if [ `echo $IP | grep '^127\\.'` ]
534 then
535 echo
536 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
537 echo " connect. either adjust /etc/hosts, or edit this script to use your"
538 echo " machine's real IP."
539 echo
540 fi
541
542 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
543 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin --set-uid=0 \
544 --cap mon 'allow *' \
545 --cap osd 'allow *' \
546 --cap mds 'allow *' \
547 --cap mgr 'allow *' \
548 "$keyring_fn"
549
550 # build a fresh fs monmap, mon fs
551 local str=""
552 local count=0
553 for f in $MONS
554 do
555 str="$str --add $f $IP:$(($CEPH_PORT+$count))"
556 wconf <<EOF
557 [mon.$f]
558 host = $HOSTNAME
559 mon data = $CEPH_DEV_DIR/mon.$f
560 mon addr = $IP:$(($CEPH_PORT+$count))
561 EOF
562 count=$(($count + 1))
563 done
564 prun "$CEPH_BIN/monmaptool" --create --clobber $str --print "$monmap_fn"
565
566 for f in $MONS
567 do
568 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
569 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
570 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
571 done
572
573 prun rm -- "$monmap_fn"
574 fi
575
576 # start monitors
577 for f in $MONS
578 do
579 run 'mon' $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
580 done
581 }
582
583 start_osd() {
584 for osd in `seq 0 $((CEPH_NUM_OSD-1))`
585 do
586 if [ "$new" -eq 1 ]; then
587 wconf <<EOF
588 [osd.$osd]
589 host = $HOSTNAME
590 EOF
591
592 rm -rf $CEPH_DEV_DIR/osd$osd || true
593 if command -v btrfs > /dev/null; then
594 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
595 fi
596 if [ -n "$filestore_path" ]; then
597 ln -s $filestore_path $CEPH_DEV_DIR/osd$osd
598 else
599 mkdir -p $CEPH_DEV_DIR/osd$osd
600 fi
601
602 local uuid=`uuidgen`
603 echo "add osd$osd $uuid"
604 ceph_adm osd create $uuid
605 ceph_adm osd crush add osd.$osd 1.0 host=$HOSTNAME root=default
606 $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS --mkfs --mkkey --osd-uuid $uuid
607
608 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
609 echo adding osd$osd key to auth repository
610 ceph_adm -i "$key_fn" auth add osd.$osd osd "allow *" mon "allow profile osd" mgr "allow profile osd"
611 fi
612 echo start osd.$osd
613 run 'osd' $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS $COSD_ARGS
614 done
615 }
616
617 start_mgr() {
618 local mgr=0
619 # avoid monitors on nearby ports (which test/*.sh use extensively)
620 MGR_PORT=$(($CEPH_PORT + 1000))
621 for name in x y z a b c d e f g h i j k l m n o p
622 do
623 [ $mgr -eq $CEPH_NUM_MGR ] && break
624 mgr=$(($mgr + 1))
625 if [ "$new" -eq 1 ]; then
626 mkdir -p $CEPH_DEV_DIR/mgr.$name
627 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
628 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
629 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
630 fi
631
632 wconf <<EOF
633 [mgr.$name]
634 host = $HOSTNAME
635 EOF
636
637 ceph_adm config-key put mgr/dashboard/$name/server_addr $IP
638 ceph_adm config-key put mgr/dashboard/$name/server_port $MGR_PORT
639 DASH_URLS+="http://$IP:$MGR_PORT/"
640 MGR_PORT=$(($MGR_PORT + 1000))
641
642 ceph_adm config-key put mgr/restful/$name/server_addr $IP
643 ceph_adm config-key put mgr/restful/$name/server_port $MGR_PORT
644
645 RESTFUL_URLS+="https://$IP:$MGR_PORT"
646 MGR_PORT=$(($MGR_PORT + 1000))
647
648 echo "Starting mgr.${name}"
649 run 'mgr' $CEPH_BIN/ceph-mgr -i $name $ARGS
650 done
651
652 SF=`mktemp`
653 ceph_adm tell mgr restful create-self-signed-cert
654 ceph_adm tell mgr restful create-key admin -o $SF
655 RESTFUL_SECRET=`cat $SF`
656 rm $SF
657 }
658
659 start_mds() {
660 if [ $new -eq 1 ]; then
661 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
662 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
663 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
664 fi
665
666 local fs=0
667 for name in a b c d e f g h i j k l m n o p
668 do
669 ceph_adm osd pool create "cephfs_data_${name}" 8
670 ceph_adm osd pool create "cephfs_metadata_${name}" 8
671 ceph_adm fs new "cephfs_${name}" "cephfs_metadata_${name}" "cephfs_data_${name}"
672 fs=$(($fs + 1))
673 [ $fs -eq $CEPH_NUM_FS ] && break
674 done
675 fi
676 fi
677
678 local mds=0
679 for name in a b c d e f g h i j k l m n o p
680 do
681 [ $mds -eq $CEPH_NUM_MDS ] && break
682 mds=$(($mds + 1))
683
684 if [ "$new" -eq 1 ]; then
685 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
686 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
687 wconf <<EOF
688 [mds.$name]
689 host = $HOSTNAME
690 EOF
691 if [ "$standby" -eq 1 ]; then
692 mkdir -p $CEPH_DEV_DIR/mds.${name}s
693 wconf <<EOF
694 mds standby for rank = $mds
695 [mds.${name}s]
696 mds standby replay = true
697 mds standby for name = ${name}
698 EOF
699 fi
700 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
701 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
702 if [ "$standby" -eq 1 ]; then
703 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
704 "$CEPH_DEV_DIR/mds.${name}s/keyring"
705 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
706 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
707 fi
708 fi
709
710 run 'mds' $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
711 if [ "$standby" -eq 1 ]; then
712 run 'mds' $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
713 fi
714
715 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
716 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
717 #ceph_adm mds set max_mds 2
718 done
719 }
720
721 if [ "$debug" -eq 0 ]; then
722 CMONDEBUG='
723 debug mon = 10
724 debug ms = 1'
725 COSDDEBUG='
726 debug ms = 1'
727 CMDSDEBUG='
728 debug ms = 1'
729 CMGRDEBUG='
730 debug ms = 1'
731 else
732 echo "** going verbose **"
733 CMONDEBUG='
734 debug mon = 20
735 debug paxos = 20
736 debug auth = 20
737 debug mgrc = 20
738 debug ms = 1'
739 COSDDEBUG='
740 debug ms = 1
741 debug osd = 25
742 debug objecter = 20
743 debug monc = 20
744 debug mgrc = 20
745 debug journal = 20
746 debug filestore = 20
747 debug bluestore = 30
748 debug bluefs = 20
749 debug rocksdb = 10
750 debug bdev = 20
751 debug rgw = 20
752 debug objclass = 20'
753 CMDSDEBUG='
754 debug ms = 1
755 debug mds = 20
756 debug auth = 20
757 debug monc = 20
758 debug mgrc = 20
759 mds debug scatterstat = true
760 mds verify scatter = true
761 mds log max segments = 2'
762 CMGRDEBUG='
763 debug ms = 1
764 debug monc = 20
765 debug mon = 20
766 debug mgr = 20'
767 fi
768
769 if [ -n "$MON_ADDR" ]; then
770 CMON_ARGS=" -m "$MON_ADDR
771 COSD_ARGS=" -m "$MON_ADDR
772 CMDS_ARGS=" -m "$MON_ADDR
773 fi
774
775 if [ "$memstore" -eq 1 ]; then
776 COSDMEMSTORE='
777 osd objectstore = memstore'
778 fi
779 if [ "$bluestore" -eq 1 ]; then
780 COSDMEMSTORE='
781 osd objectstore = bluestore'
782 fi
783
784 if [ -z "$CEPH_PORT" ]; then
785 CEPH_PORT=6789
786 [ -e ".ceph_port" ] && CEPH_PORT=`cat .ceph_port`
787 fi
788
789 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
790
791 # sudo if btrfs
792 test -d $CEPH_DEV_DIR/osd0/. && test -e $CEPH_DEV_DIR/sudo && SUDO="sudo"
793
794 prun $SUDO rm -f core*
795
796 test -d $CEPH_OUT_DIR || mkdir $CEPH_OUT_DIR
797 test -d $CEPH_DEV_DIR || mkdir $CEPH_DEV_DIR
798 $SUDO rm -rf $CEPH_OUT_DIR/*
799 test -d gmon && $SUDO rm -rf gmon/*
800
801 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && test -e $keyring_fn && rm $keyring_fn
802
803
804 # figure machine's ip
805 HOSTNAME=`hostname -s`
806 if [ -n "$ip" ]; then
807 IP="$ip"
808 else
809 echo hostname $HOSTNAME
810 if [ -x "$(which ip 2>/dev/null)" ]; then
811 IP_CMD="ip addr"
812 else
813 IP_CMD="ifconfig"
814 fi
815 # filter out IPv6 and localhost addresses
816 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
817 # if nothing left, try using localhost address, it might work
818 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
819 fi
820 echo "ip $IP"
821 echo "port $CEPH_PORT"
822
823
824 [ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
825
826 ceph_adm() {
827 if [ "$cephx" -eq 1 ]; then
828 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
829 else
830 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
831 fi
832 }
833
834 if [ "$new" -eq 1 ]; then
835 prepare_conf
836 fi
837
838 if [ $CEPH_NUM_MON -gt 0 ]; then
839 start_mon
840 fi
841
842 # osd
843 if [ $CEPH_NUM_OSD -gt 0 ]; then
844 start_osd
845 fi
846
847 # mds
848 if [ "$smallmds" -eq 1 ]; then
849 wconf <<EOF
850 [mds]
851 mds log max segments = 2
852 mds cache size = 10000
853 EOF
854 fi
855
856 if [ $CEPH_NUM_MDS -gt 0 ]; then
857 start_mds
858 fi
859
860 # Don't set max_mds until all the daemons are started, otherwise
861 # the intended standbys might end up in active roles.
862 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
863 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
864 fi
865 fs=0
866 for name in a b c d e f g h i j k l m n o p
867 do
868 [ $fs -eq $CEPH_NUM_FS ] && break
869 fs=$(($fs + 1))
870 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
871 ceph_adm fs set "cephfs_${name}" allow_multimds true --yes-i-really-mean-it
872 ceph_adm fs set "cephfs_${name}" max_mds "$CEPH_MAX_MDS"
873 fi
874 done
875
876 # mgr
877
878 if [ $CEPH_NUM_MGR -gt 0 ]; then
879 start_mgr
880 fi
881
882 if [ "$ec" -eq 1 ]; then
883 ceph_adm <<EOF
884 osd erasure-code-profile set ec-profile m=2 k=2
885 osd pool create ec 8 8 erasure ec-profile
886 EOF
887 fi
888
889 do_cache() {
890 while [ -n "$*" ]; do
891 p="$1"
892 shift
893 echo "creating cache for pool $p ..."
894 ceph_adm <<EOF
895 osd pool create ${p}-cache 8
896 osd tier add $p ${p}-cache
897 osd tier cache-mode ${p}-cache writeback
898 osd tier set-overlay $p ${p}-cache
899 EOF
900 done
901 }
902 do_cache $cache
903
904 do_hitsets() {
905 while [ -n "$*" ]; do
906 pool="$1"
907 type="$2"
908 shift
909 shift
910 echo "setting hit_set on pool $pool type $type ..."
911 ceph_adm <<EOF
912 osd pool set $pool hit_set_type $type
913 osd pool set $pool hit_set_count 8
914 osd pool set $pool hit_set_period 30
915 EOF
916 done
917 }
918 do_hitsets $hitset
919
920 do_rgw()
921 {
922 # Create S3 user
923 local akey='0555b35654ad1656d804'
924 local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
925 echo "setting up user testid"
926 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
927
928 # Create S3-test users
929 # See: https://github.com/ceph/s3-tests
930 echo "setting up s3-test users"
931 $CEPH_BIN/radosgw-admin user create \
932 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
933 --access-key ABCDEFGHIJKLMNOPQRST \
934 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
935 --display-name youruseridhere \
936 --email s3@example.com -c $conf_fn > /dev/null
937 $CEPH_BIN/radosgw-admin user create \
938 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
939 --access-key NOPQRSTUVWXYZABCDEFG \
940 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
941 --display-name john.doe \
942 --email john.doe@example.com -c $conf_fn > /dev/null
943 $CEPH_BIN/radosgw-admin user create \
944 --tenant testx \
945 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
946 --access-key HIJKLMNOPQRSTUVWXYZA \
947 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
948 --display-name tenanteduser \
949 --email tenanteduser@example.com -c $conf_fn > /dev/null
950
951 # Create Swift user
952 echo "setting up user tester"
953 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
954
955 echo ""
956 echo "S3 User Info:"
957 echo " access key: $akey"
958 echo " secret key: $skey"
959 echo ""
960 echo "Swift User Info:"
961 echo " account : test"
962 echo " user : tester"
963 echo " password : testing"
964 echo ""
965
966 # Start server
967 echo start rgw on http://localhost:$CEPH_RGW_PORT
968 RGWDEBUG=""
969 if [ "$debug" -ne 0 ]; then
970 RGWDEBUG="--debug-rgw=20"
971 fi
972
973 RGWSUDO=
974 [ $CEPH_RGW_PORT -lt 1024 ] && RGWSUDO=sudo
975 n=$(($CEPH_NUM_RGW - 1))
976 for rgw in `seq 0 $n`; do
977 run 'rgw' $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn --log-file=${CEPH_OUT_DIR}/rgw.$rgw.log ${RGWDEBUG} --debug-ms=1
978 done
979 }
980 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
981 do_rgw
982 fi
983
984 echo "started. stop.sh to stop. see out/* (e.g. 'tail -f out/????') for debug output."
985
986 echo ""
987 echo "dashboard urls: $DASH_URLS"
988 echo " restful urls: $RESTFUL_URLS"
989 echo " w/ user/pass: admin / $RESTFUL_SECRET"
990 echo ""
991 echo "export PYTHONPATH=./pybind:$PYTHONPATH"
992 echo "export LD_LIBRARY_PATH=$CEPH_LIB"
993
994 if [ "$CEPH_DIR" != "$PWD" ]; then
995 echo "export CEPH_CONF=$conf_fn"
996 echo "export CEPH_KEYRING=$keyring_fn"
997 fi
998
999 echo "CEPH_DEV=1"
1000