]> git.proxmox.com Git - ceph.git/blob - ceph/src/vstart.sh
update sources to v12.1.1
[ceph.git] / ceph / src / vstart.sh
1 #!/bin/bash
2
3 # abort on failure
4 set -e
5
6 if [ -n "$VSTART_DEST" ]; then
7 SRC_PATH=`dirname $0`
8 SRC_PATH=`(cd $SRC_PATH; pwd)`
9
10 CEPH_DIR=$SRC_PATH
11 CEPH_BIN=${PWD}/bin
12 CEPH_LIB=${PWD}/lib
13
14 CEPH_CONF_PATH=$VSTART_DEST
15 CEPH_DEV_DIR=$VSTART_DEST/dev
16 CEPH_OUT_DIR=$VSTART_DEST/out
17 fi
18
19 # for running out of the CMake build directory
20 if [ -e CMakeCache.txt ]; then
21 # Out of tree build, learn source location from CMakeCache.txt
22 CEPH_ROOT=`grep ceph_SOURCE_DIR CMakeCache.txt | cut -d "=" -f 2`
23 CEPH_BUILD_DIR=`pwd`
24 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
25 fi
26
27 # use CEPH_BUILD_ROOT to vstart from a 'make install'
28 if [ -n "$CEPH_BUILD_ROOT" ]; then
29 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
30 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
31 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
32 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
33 elif [ -n "$CEPH_ROOT" ]; then
34 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
35 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
36 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
37 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BUILD_DIR/bin/init-ceph
38 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
39 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
40 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
41 fi
42
43 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
44 PATH=$(pwd):$PATH
45 fi
46
47 [ -z "$PYBIND" ] && PYBIND=./pybind
48
49 export PYTHONPATH=$PYBIND:$CEPH_LIB/cython_modules/lib.2:$PYTHONPATH
50 export LD_LIBRARY_PATH=$CEPH_LIB:$LD_LIBRARY_PATH
51 export DYLD_LIBRARY_PATH=$CEPH_LIB:$DYLD_LIBRARY_PATH
52 # Suppress logging for regular use that indicated that we are using a
53 # development version. vstart.sh is only used during testing and
54 # development
55 export CEPH_DEV=1
56
57 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
58 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
59 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
60 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
61 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
62 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
63
64 # if none of the CEPH_NUM_* number is specified, kill the existing
65 # cluster.
66 if [ -z "$CEPH_NUM_MON" -a \
67 -z "$CEPH_NUM_OSD" -a \
68 -z "$CEPH_NUM_MDS" -a \
69 -z "$CEPH_NUM_MGR" ]; then
70 kill_all=1
71 else
72 kill_all=0
73 fi
74
75 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
76 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
77 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
78 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
79 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
80 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
81 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
82
83 [ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
84 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
85 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
86 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
87 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
88
89 if [ $CEPH_NUM_OSD -gt 3 ]; then
90 OSD_POOL_DEFAULT_SIZE=3
91 else
92 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
93 fi
94
95 extra_conf=""
96 new=0
97 standby=0
98 debug=0
99 ip=""
100 nodaemon=0
101 smallmds=0
102 short=0
103 ec=0
104 hitset=""
105 overwrite_conf=1
106 cephx=1 #turn cephx on by default
107 cache=""
108 memstore=0
109 bluestore=0
110 rgw_frontend="civetweb"
111 rgw_compression=""
112 lockdep=${LOCKDEP:-1}
113
114 filestore_path=
115
116 VSTART_SEC="client.vstart.sh"
117
118 MON_ADDR=""
119 DASH_URLS=""
120 RESTFUL_URLS=""
121
122 conf_fn="$CEPH_CONF_PATH/ceph.conf"
123 keyring_fn="$CEPH_CONF_PATH/keyring"
124 osdmap_fn="/tmp/ceph_osdmap.$$"
125 monmap_fn="/tmp/ceph_monmap.$$"
126
127 usage="usage: $0 [option]... \nex: $0 -n -d --mon_num 3 --osd_num 3 --mds_num 1 --rgw_num 1\n"
128 usage=$usage"options:\n"
129 usage=$usage"\t-d, --debug\n"
130 usage=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
131 usage=$usage"\t-l, --localhost: use localhost instead of hostname\n"
132 usage=$usage"\t-i <ip>: bind to specific ip\n"
133 usage=$usage"\t-n, --new\n"
134 usage=$usage"\t-N, --not-new: reuse existing cluster config (default)\n"
135 usage=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
136 usage=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
137 usage=$usage"\t--smallmds: limit mds cache size\n"
138 usage=$usage"\t-m ip:port\t\tspecify monitor address\n"
139 usage=$usage"\t-k keep old configuration files\n"
140 usage=$usage"\t-x enable cephx (on by default)\n"
141 usage=$usage"\t-X disable cephx\n"
142 usage=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
143 usage=$usage"\t-e : create an erasure pool\n";
144 usage=$usage"\t-o config\t\t add extra config parameters to all sections\n"
145 usage=$usage"\t--mon_num specify ceph monitor count\n"
146 usage=$usage"\t--osd_num specify ceph osd count\n"
147 usage=$usage"\t--mds_num specify ceph mds count\n"
148 usage=$usage"\t--rgw_num specify ceph rgw count\n"
149 usage=$usage"\t--mgr_num specify ceph mgr count\n"
150 usage=$usage"\t--rgw_port specify ceph rgw http listen port\n"
151 usage=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
152 usage=$usage"\t--rgw_compression specify the rgw compression plugin\n"
153 usage=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend\n"
154 usage=$usage"\t--memstore use memstore as the osd objectstore backend\n"
155 usage=$usage"\t--cache <pool>: enable cache tiering on pool\n"
156 usage=$usage"\t--short: short object names only; necessary for ext4 dev\n"
157 usage=$usage"\t--nolockdep disable lockdep\n"
158 usage=$usage"\t--multimds <count> allow multimds with maximum active count\n"
159
160 usage_exit() {
161 printf "$usage"
162 exit
163 }
164
165 while [ $# -ge 1 ]; do
166 case $1 in
167 -d | --debug )
168 debug=1
169 ;;
170 -s | --standby_mds)
171 standby=1
172 ;;
173 -l | --localhost )
174 ip="127.0.0.1"
175 ;;
176 -i )
177 [ -z "$2" ] && usage_exit
178 ip="$2"
179 shift
180 ;;
181 -e )
182 ec=1
183 ;;
184 --new | -n )
185 new=1
186 ;;
187 --not-new | -N )
188 new=0
189 ;;
190 --short )
191 short=1
192 ;;
193 --valgrind )
194 [ -z "$2" ] && usage_exit
195 valgrind=$2
196 shift
197 ;;
198 --valgrind_args )
199 valgrind_args="$2"
200 shift
201 ;;
202 --valgrind_mds )
203 [ -z "$2" ] && usage_exit
204 valgrind_mds=$2
205 shift
206 ;;
207 --valgrind_osd )
208 [ -z "$2" ] && usage_exit
209 valgrind_osd=$2
210 shift
211 ;;
212 --valgrind_mon )
213 [ -z "$2" ] && usage_exit
214 valgrind_mon=$2
215 shift
216 ;;
217 --valgrind_mgr )
218 [ -z "$2" ] && usage_exit
219 valgrind_mgr=$2
220 shift
221 ;;
222 --valgrind_rgw )
223 [ -z "$2" ] && usage_exit
224 valgrind_rgw=$2
225 shift
226 ;;
227 --nodaemon )
228 nodaemon=1
229 ;;
230 --smallmds )
231 smallmds=1
232 ;;
233 --mon_num )
234 echo "mon_num:$2"
235 CEPH_NUM_MON="$2"
236 shift
237 ;;
238 --osd_num )
239 CEPH_NUM_OSD=$2
240 shift
241 ;;
242 --mds_num )
243 CEPH_NUM_MDS=$2
244 shift
245 ;;
246 --rgw_num )
247 CEPH_NUM_RGW=$2
248 shift
249 ;;
250 --mgr_num )
251 CEPH_NUM_MGR=$2
252 shift
253 ;;
254 --rgw_port )
255 CEPH_RGW_PORT=$2
256 shift
257 ;;
258 --rgw_frontend )
259 rgw_frontend=$2
260 shift
261 ;;
262 --rgw_compression )
263 rgw_compression=$2
264 shift
265 ;;
266 --filestore_path )
267 filestore_path=$2
268 shift
269 ;;
270 -m )
271 [ -z "$2" ] && usage_exit
272 MON_ADDR=$2
273 shift
274 ;;
275 -x )
276 cephx=1 # this is on be default, flag exists for historical consistency
277 ;;
278 -X )
279 cephx=0
280 ;;
281 -k )
282 if [ ! -r $conf_fn ]; then
283 echo "cannot use old configuration: $conf_fn not readable." >&2
284 exit
285 fi
286 overwrite_conf=0
287 ;;
288 --memstore )
289 memstore=1
290 ;;
291 -b | --bluestore )
292 bluestore=1
293 ;;
294 --hitset )
295 hitset="$hitset $2 $3"
296 shift
297 shift
298 ;;
299 -o )
300 extra_conf="$extra_conf $2
301 "
302 shift
303 ;;
304 --cache )
305 if [ -z "$cache" ]; then
306 cache="$2"
307 else
308 cache="$cache $2"
309 fi
310 shift
311 ;;
312 --nolockdep )
313 lockdep=0
314 ;;
315 --multimds)
316 CEPH_MAX_MDS="$2"
317 shift
318 ;;
319 * )
320 usage_exit
321 esac
322 shift
323 done
324
325 if [ $kill_all -eq 1 ]; then
326 $SUDO $INIT_CEPH stop
327 fi
328
329 if [ "$overwrite_conf" -eq 0 ]; then
330 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mon 2>/dev/null` && \
331 CEPH_NUM_MON="$MON"
332 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_osd 2>/dev/null` && \
333 CEPH_NUM_OSD="$OSD"
334 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mds 2>/dev/null` && \
335 CEPH_NUM_MDS="$MDS"
336 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mgr 2>/dev/null` && \
337 CEPH_NUM_MGR="$MGR"
338 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_rgw 2>/dev/null` && \
339 CEPH_NUM_RGW="$RGW"
340 else
341 if [ "$new" -ne 0 ]; then
342 # only delete if -n
343 [ -e "$conf_fn" ] && rm -- "$conf_fn"
344 else
345 # -k is implied... (doesn't make sense otherwise)
346 overwrite_conf=0
347 fi
348 fi
349
350 ARGS="-c $conf_fn"
351
352 prunb() {
353 echo "$* &"
354 "$@" &
355 }
356
357 prun() {
358 echo "$*"
359 "$@"
360 }
361
362 run() {
363 type=$1
364 shift
365 eval "valg=\$valgrind_$type"
366 [ -z "$valg" ] && valg="$valgrind"
367
368 if [ -n "$valg" ]; then
369 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
370 sleep 1
371 else
372 if [ "$nodaemon" -eq 0 ]; then
373 prun "$@"
374 else
375 prunb ./ceph-run "$@" -f
376 fi
377 fi
378 }
379
380 wconf() {
381 if [ "$overwrite_conf" -eq 1 ]; then
382 cat >> "$conf_fn"
383 fi
384 }
385
386 prepare_conf() {
387 local DAEMONOPTS="
388 log file = $CEPH_OUT_DIR/\$name.log
389 admin socket = $CEPH_OUT_DIR/\$name.asok
390 chdir = \"\"
391 pid file = $CEPH_OUT_DIR/\$name.pid
392 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
393 "
394
395 wconf <<EOF
396 ; generated by vstart.sh on `date`
397 [$VSTART_SEC]
398 num mon = $CEPH_NUM_MON
399 num osd = $CEPH_NUM_OSD
400 num mds = $CEPH_NUM_MDS
401 num mgr = $CEPH_NUM_MGR
402 num rgw = $CEPH_NUM_RGW
403
404 [global]
405 fsid = $(uuidgen)
406 osd pg bits = 3
407 osd pgp bits = 5 ; (invalid, but ceph should cope!)
408 osd pool default size = $OSD_POOL_DEFAULT_SIZE
409 osd crush chooseleaf type = 0
410 osd pool default min size = 1
411 osd failsafe full ratio = .99
412 mon osd reporter subtree level = osd
413 mon osd full ratio = .99
414 mon data avail warn = 10
415 mon data avail crit = 1
416 erasure code dir = $EC_PATH
417 plugin dir = $CEPH_LIB
418 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
419 rgw frontends = $rgw_frontend port=$CEPH_RGW_PORT
420 ; needed for s3tests
421 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
422 rgw crypt require ssl = false
423 rgw lc debug interval = 10
424 filestore fd cache size = 32
425 run dir = $CEPH_OUT_DIR
426 enable experimental unrecoverable data corrupting features = *
427 EOF
428 if [ "$lockdep" -eq 1 ] ; then
429 wconf <<EOF
430 lockdep = true
431 EOF
432 fi
433 if [ "$cephx" -eq 1 ] ; then
434 wconf <<EOF
435 auth cluster required = cephx
436 auth service required = cephx
437 auth client required = cephx
438 EOF
439 else
440 wconf <<EOF
441 auth cluster required = none
442 auth service required = none
443 auth client required = none
444 EOF
445 fi
446 if [ "$short" -eq 1 ]; then
447 COSDSHORT=" osd max object name len = 460
448 osd max object namespace len = 64"
449 fi
450 wconf <<EOF
451 [client]
452 keyring = $keyring_fn
453 log file = $CEPH_OUT_DIR/\$name.\$pid.log
454 admin socket = $CEPH_OUT_DIR/\$name.\$pid.asok
455
456 [client.rgw]
457
458 [mds]
459 $DAEMONOPTS
460 $CMDSDEBUG
461 mds debug frag = true
462 mds debug auth pins = true
463 mds debug subtrees = true
464 mds data = $CEPH_DEV_DIR/mds.\$id
465 mds root ino uid = `id -u`
466 mds root ino gid = `id -g`
467 $extra_conf
468 [mgr]
469 mgr data = $CEPH_DEV_DIR/mgr.\$id
470 mgr module path = $MGR_PYTHON_PATH
471 mon reweight min pgs per osd = 4
472 mon pg warn min per osd = 3
473 $DAEMONOPTS
474 $CMGRDEBUG
475 $extra_conf
476 [osd]
477 $DAEMONOPTS
478 osd_check_max_object_name_len_on_startup = false
479 osd data = $CEPH_DEV_DIR/osd\$id
480 osd journal = $CEPH_DEV_DIR/osd\$id/journal
481 osd journal size = 100
482 osd class tmp = out
483 osd class dir = $OBJCLASS_PATH
484 osd class load list = *
485 osd class default list = *
486 osd scrub load threshold = 2000.0
487 osd debug op order = true
488 osd debug misdirected ops = true
489 filestore wbthrottle xfs ios start flusher = 10
490 filestore wbthrottle xfs ios hard limit = 20
491 filestore wbthrottle xfs inodes hard limit = 30
492 filestore wbthrottle btrfs ios start flusher = 10
493 filestore wbthrottle btrfs ios hard limit = 20
494 filestore wbthrottle btrfs inodes hard limit = 30
495 osd copyfrom max chunk = 524288
496 bluestore fsck on mount = true
497 bluestore block create = true
498 bluestore block db size = 67108864
499 bluestore block db create = true
500 bluestore block wal size = 1048576000
501 bluestore block wal create = true
502 $COSDDEBUG
503 $COSDMEMSTORE
504 $COSDSHORT
505 $extra_conf
506 [mon]
507 mgr initial modules = restful status dashboard
508 mon pg warn min per osd = 3
509 mon osd allow primary affinity = true
510 mon reweight min pgs per osd = 4
511 mon osd prime pg temp = true
512 crushtool = $CEPH_BIN/crushtool
513 mon allow pool delete = true
514 $DAEMONOPTS
515 $CMONDEBUG
516 $extra_conf
517 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
518 [global]
519 $extra_conf
520 EOF
521 }
522
523 start_mon() {
524 local MONS=""
525 local count=0
526 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
527 do
528 [ $count -eq $CEPH_NUM_MON ] && break;
529 count=$(($count + 1))
530 if [ -z "$MONS" ];
531 then
532 MONS="$f"
533 else
534 MONS="$MONS $f"
535 fi
536 done
537
538 if [ "$new" -eq 1 ]; then
539 if [ `echo $IP | grep '^127\\.'` ]
540 then
541 echo
542 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
543 echo " connect. either adjust /etc/hosts, or edit this script to use your"
544 echo " machine's real IP."
545 echo
546 fi
547
548 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
549 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin --set-uid=0 \
550 --cap mon 'allow *' \
551 --cap osd 'allow *' \
552 --cap mds 'allow *' \
553 --cap mgr 'allow *' \
554 "$keyring_fn"
555
556 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.rgw \
557 --cap mon 'allow rw' \
558 --cap osd 'allow rwx' \
559 --cap mgr 'allow rw' \
560 "$keyring_fn"
561
562 # build a fresh fs monmap, mon fs
563 local str=""
564 local count=0
565 for f in $MONS
566 do
567 str="$str --add $f $IP:$(($CEPH_PORT+$count))"
568 wconf <<EOF
569 [mon.$f]
570 host = $HOSTNAME
571 mon data = $CEPH_DEV_DIR/mon.$f
572 mon addr = $IP:$(($CEPH_PORT+$count))
573 EOF
574 count=$(($count + 1))
575 done
576 prun "$CEPH_BIN/monmaptool" --create --clobber $str --print "$monmap_fn"
577
578 for f in $MONS
579 do
580 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
581 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
582 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
583 done
584
585 prun rm -- "$monmap_fn"
586 fi
587
588 # start monitors
589 for f in $MONS
590 do
591 run 'mon' $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
592 done
593 }
594
595 start_osd() {
596 for osd in `seq 0 $((CEPH_NUM_OSD-1))`
597 do
598 if [ "$new" -eq 1 ]; then
599 wconf <<EOF
600 [osd.$osd]
601 host = $HOSTNAME
602 EOF
603
604 rm -rf $CEPH_DEV_DIR/osd$osd || true
605 if command -v btrfs > /dev/null; then
606 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
607 fi
608 if [ -n "$filestore_path" ]; then
609 ln -s $filestore_path $CEPH_DEV_DIR/osd$osd
610 else
611 mkdir -p $CEPH_DEV_DIR/osd$osd
612 fi
613
614 local uuid=`uuidgen`
615 echo "add osd$osd $uuid"
616 ceph_adm osd create $uuid
617 ceph_adm osd crush add osd.$osd 1.0 host=$HOSTNAME root=default
618 $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS --mkfs --mkkey --osd-uuid $uuid
619
620 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
621 echo adding osd$osd key to auth repository
622 ceph_adm -i "$key_fn" auth add osd.$osd osd "allow *" mon "allow profile osd" mgr "allow profile osd"
623 fi
624 echo start osd.$osd
625 run 'osd' $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS $COSD_ARGS
626 done
627 }
628
629 start_mgr() {
630 local mgr=0
631 # avoid monitors on nearby ports (which test/*.sh use extensively)
632 MGR_PORT=$(($CEPH_PORT + 1000))
633 for name in x y z a b c d e f g h i j k l m n o p
634 do
635 [ $mgr -eq $CEPH_NUM_MGR ] && break
636 mgr=$(($mgr + 1))
637 if [ "$new" -eq 1 ]; then
638 mkdir -p $CEPH_DEV_DIR/mgr.$name
639 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
640 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
641 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
642 fi
643
644 wconf <<EOF
645 [mgr.$name]
646 host = $HOSTNAME
647 EOF
648
649 ceph_adm config-key put mgr/dashboard/$name/server_port $MGR_PORT
650 DASH_URLS+="http://$IP:$MGR_PORT/"
651 MGR_PORT=$(($MGR_PORT + 1000))
652
653 ceph_adm config-key put mgr/restful/$name/server_port $MGR_PORT
654
655 RESTFUL_URLS+="https://$IP:$MGR_PORT"
656 MGR_PORT=$(($MGR_PORT + 1000))
657
658 echo "Starting mgr.${name}"
659 run 'mgr' $CEPH_BIN/ceph-mgr -i $name $ARGS
660 done
661
662 if ceph_adm tell mgr restful create-self-signed-cert; then
663 SF=`mktemp`
664 ceph_adm tell mgr restful create-key admin -o $SF
665 RESTFUL_SECRET=`cat $SF`
666 rm $SF
667 else
668 echo MGR Restful is not working, perhaps the package is not installed?
669 fi
670 }
671
672 start_mds() {
673 if [ $new -eq 1 ]; then
674 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
675 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
676 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
677 fi
678
679 local fs=0
680 for name in a b c d e f g h i j k l m n o p
681 do
682 ceph_adm osd pool create "cephfs_data_${name}" 8
683 ceph_adm osd pool create "cephfs_metadata_${name}" 8
684 ceph_adm fs new "cephfs_${name}" "cephfs_metadata_${name}" "cephfs_data_${name}"
685 fs=$(($fs + 1))
686 [ $fs -eq $CEPH_NUM_FS ] && break
687 done
688 fi
689 fi
690
691 local mds=0
692 for name in a b c d e f g h i j k l m n o p
693 do
694 [ $mds -eq $CEPH_NUM_MDS ] && break
695 mds=$(($mds + 1))
696
697 if [ "$new" -eq 1 ]; then
698 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
699 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
700 wconf <<EOF
701 [mds.$name]
702 host = $HOSTNAME
703 EOF
704 if [ "$standby" -eq 1 ]; then
705 mkdir -p $CEPH_DEV_DIR/mds.${name}s
706 wconf <<EOF
707 mds standby for rank = $mds
708 [mds.${name}s]
709 mds standby replay = true
710 mds standby for name = ${name}
711 EOF
712 fi
713 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
714 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
715 if [ "$standby" -eq 1 ]; then
716 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
717 "$CEPH_DEV_DIR/mds.${name}s/keyring"
718 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
719 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
720 fi
721 fi
722
723 run 'mds' $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
724 if [ "$standby" -eq 1 ]; then
725 run 'mds' $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
726 fi
727
728 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
729 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
730 #ceph_adm mds set max_mds 2
731 done
732 }
733
734 if [ "$debug" -eq 0 ]; then
735 CMONDEBUG='
736 debug mon = 10
737 debug ms = 1'
738 COSDDEBUG='
739 debug ms = 1'
740 CMDSDEBUG='
741 debug ms = 1'
742 CMGRDEBUG='
743 debug ms = 1'
744 else
745 echo "** going verbose **"
746 CMONDEBUG='
747 debug mon = 20
748 debug paxos = 20
749 debug auth = 20
750 debug mgrc = 20
751 debug ms = 1'
752 COSDDEBUG='
753 debug ms = 1
754 debug osd = 25
755 debug objecter = 20
756 debug monc = 20
757 debug mgrc = 20
758 debug journal = 20
759 debug filestore = 20
760 debug bluestore = 30
761 debug bluefs = 20
762 debug rocksdb = 10
763 debug bdev = 20
764 debug rgw = 20
765 debug objclass = 20'
766 CMDSDEBUG='
767 debug ms = 1
768 debug mds = 20
769 debug auth = 20
770 debug monc = 20
771 debug mgrc = 20
772 mds debug scatterstat = true
773 mds verify scatter = true
774 mds log max segments = 2'
775 CMGRDEBUG='
776 debug ms = 1
777 debug monc = 20
778 debug mon = 20
779 debug mgr = 20'
780 fi
781
782 if [ -n "$MON_ADDR" ]; then
783 CMON_ARGS=" -m "$MON_ADDR
784 COSD_ARGS=" -m "$MON_ADDR
785 CMDS_ARGS=" -m "$MON_ADDR
786 fi
787
788 if [ "$memstore" -eq 1 ]; then
789 COSDMEMSTORE='
790 osd objectstore = memstore'
791 fi
792 if [ "$bluestore" -eq 1 ]; then
793 COSDMEMSTORE='
794 osd objectstore = bluestore'
795 fi
796
797 if [ -z "$CEPH_PORT" ]; then
798 CEPH_PORT=6789
799 [ -e ".ceph_port" ] && CEPH_PORT=`cat .ceph_port`
800 fi
801
802 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
803
804 # sudo if btrfs
805 test -d $CEPH_DEV_DIR/osd0/. && test -e $CEPH_DEV_DIR/sudo && SUDO="sudo"
806
807 prun $SUDO rm -f core*
808
809 test -d $CEPH_OUT_DIR || mkdir $CEPH_OUT_DIR
810 test -d $CEPH_DEV_DIR || mkdir $CEPH_DEV_DIR
811 $SUDO rm -rf $CEPH_OUT_DIR/*
812 test -d gmon && $SUDO rm -rf gmon/*
813
814 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && test -e $keyring_fn && rm $keyring_fn
815
816
817 # figure machine's ip
818 HOSTNAME=`hostname -s`
819 if [ -n "$ip" ]; then
820 IP="$ip"
821 else
822 echo hostname $HOSTNAME
823 if [ -x "$(which ip 2>/dev/null)" ]; then
824 IP_CMD="ip addr"
825 else
826 IP_CMD="ifconfig"
827 fi
828 # filter out IPv6 and localhost addresses
829 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
830 # if nothing left, try using localhost address, it might work
831 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
832 fi
833 echo "ip $IP"
834 echo "port $CEPH_PORT"
835
836
837 [ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
838
839 ceph_adm() {
840 if [ "$cephx" -eq 1 ]; then
841 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
842 else
843 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
844 fi
845 }
846
847 if [ "$new" -eq 1 ]; then
848 prepare_conf
849 fi
850
851 if [ $CEPH_NUM_MON -gt 0 ]; then
852 start_mon
853 fi
854
855 # osd
856 if [ $CEPH_NUM_OSD -gt 0 ]; then
857 start_osd
858 fi
859
860 # mds
861 if [ "$smallmds" -eq 1 ]; then
862 wconf <<EOF
863 [mds]
864 mds log max segments = 2
865 mds cache size = 10000
866 EOF
867 fi
868
869 if [ $CEPH_NUM_MDS -gt 0 ]; then
870 start_mds
871 fi
872
873 # Don't set max_mds until all the daemons are started, otherwise
874 # the intended standbys might end up in active roles.
875 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
876 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
877 fi
878 fs=0
879 for name in a b c d e f g h i j k l m n o p
880 do
881 [ $fs -eq $CEPH_NUM_FS ] && break
882 fs=$(($fs + 1))
883 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
884 ceph_adm fs set "cephfs_${name}" allow_multimds true --yes-i-really-mean-it
885 ceph_adm fs set "cephfs_${name}" max_mds "$CEPH_MAX_MDS"
886 fi
887 done
888
889 # mgr
890
891 if [ $CEPH_NUM_MGR -gt 0 ]; then
892 start_mgr
893 fi
894
895 if [ "$ec" -eq 1 ]; then
896 ceph_adm <<EOF
897 osd erasure-code-profile set ec-profile m=2 k=2
898 osd pool create ec 8 8 erasure ec-profile
899 EOF
900 fi
901
902 do_cache() {
903 while [ -n "$*" ]; do
904 p="$1"
905 shift
906 echo "creating cache for pool $p ..."
907 ceph_adm <<EOF
908 osd pool create ${p}-cache 8
909 osd tier add $p ${p}-cache
910 osd tier cache-mode ${p}-cache writeback
911 osd tier set-overlay $p ${p}-cache
912 EOF
913 done
914 }
915 do_cache $cache
916
917 do_hitsets() {
918 while [ -n "$*" ]; do
919 pool="$1"
920 type="$2"
921 shift
922 shift
923 echo "setting hit_set on pool $pool type $type ..."
924 ceph_adm <<EOF
925 osd pool set $pool hit_set_type $type
926 osd pool set $pool hit_set_count 8
927 osd pool set $pool hit_set_period 30
928 EOF
929 done
930 }
931 do_hitsets $hitset
932
933 do_rgw_create_users()
934 {
935 # Create S3 user
936 local akey='0555b35654ad1656d804'
937 local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
938 echo "setting up user testid"
939 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
940
941 # Create S3-test users
942 # See: https://github.com/ceph/s3-tests
943 echo "setting up s3-test users"
944 $CEPH_BIN/radosgw-admin user create \
945 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
946 --access-key ABCDEFGHIJKLMNOPQRST \
947 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
948 --display-name youruseridhere \
949 --email s3@example.com -c $conf_fn > /dev/null
950 $CEPH_BIN/radosgw-admin user create \
951 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
952 --access-key NOPQRSTUVWXYZABCDEFG \
953 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
954 --display-name john.doe \
955 --email john.doe@example.com -c $conf_fn > /dev/null
956 $CEPH_BIN/radosgw-admin user create \
957 --tenant testx \
958 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
959 --access-key HIJKLMNOPQRSTUVWXYZA \
960 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
961 --display-name tenanteduser \
962 --email tenanteduser@example.com -c $conf_fn > /dev/null
963
964 # Create Swift user
965 echo "setting up user tester"
966 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
967
968 echo ""
969 echo "S3 User Info:"
970 echo " access key: $akey"
971 echo " secret key: $skey"
972 echo ""
973 echo "Swift User Info:"
974 echo " account : test"
975 echo " user : tester"
976 echo " password : testing"
977 echo ""
978 }
979
980 do_rgw()
981 {
982 if [ "$new" -eq 1 ]; then
983 do_rgw_create_users
984 if [ -n "$rgw_compression" ]; then
985 echo "setting compression type=$rgw_compression"
986 $CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
987 fi
988 fi
989 # Start server
990 RGWDEBUG=""
991 if [ "$debug" -ne 0 ]; then
992 RGWDEBUG="--debug-rgw=20"
993 fi
994
995 RGWSUDO=
996 [ $CEPH_RGW_PORT -lt 1024 ] && RGWSUDO=sudo
997 n=$(($CEPH_NUM_RGW - 1))
998 i=0
999 for rgw in j k l m n o p q r s t u v; do
1000 echo start rgw on http://localhost:$((CEPH_RGW_PORT + i))
1001 run 'rgw' $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn --log-file=${CEPH_OUT_DIR}/rgw.$rgw.log ${RGWDEBUG} --debug-ms=1 -n client.rgw "--rgw_frontends=${rgw_frontend} port=$((CEPH_RGW_PORT + i))"
1002 i=$(($i + 1))
1003 [ $i -eq $CEPH_NUM_RGW ] && break
1004 done
1005 }
1006 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1007 do_rgw
1008 fi
1009
1010 echo "started. stop.sh to stop. see out/* (e.g. 'tail -f out/????') for debug output."
1011
1012 echo ""
1013 echo "dashboard urls: $DASH_URLS"
1014 echo " restful urls: $RESTFUL_URLS"
1015 echo " w/ user/pass: admin / $RESTFUL_SECRET"
1016 echo ""
1017 echo "export PYTHONPATH=./pybind:$PYTHONPATH"
1018 echo "export LD_LIBRARY_PATH=$CEPH_LIB"
1019
1020 if [ "$CEPH_DIR" != "$PWD" ]; then
1021 echo "export CEPH_CONF=$conf_fn"
1022 echo "export CEPH_KEYRING=$keyring_fn"
1023 fi
1024
1025 echo "CEPH_DEV=1"
1026