]> git.proxmox.com Git - ceph.git/blame - ceph/src/vstart.sh
bump version to 12.0.3-pve3
[ceph.git] / ceph / src / vstart.sh
CommitLineData
7c673cae
FG
1#!/bin/sh
2
3# abort on failure
4set -e
5
6if [ -n "$VSTART_DEST" ]; then
7 SRC_PATH=`dirname $0`
8 SRC_PATH=`(cd $SRC_PATH; pwd)`
9
10 CEPH_DIR=$SRC_PATH
11 CEPH_BIN=${PWD}/bin
12 CEPH_LIB=${PWD}/lib
13
14 CEPH_CONF_PATH=$VSTART_DEST
15 CEPH_DEV_DIR=$VSTART_DEST/dev
16 CEPH_OUT_DIR=$VSTART_DEST/out
17fi
18
19# for running out of the CMake build directory
20if [ -e CMakeCache.txt ]; then
21 # Out of tree build, learn source location from CMakeCache.txt
22 CEPH_ROOT=`grep ceph_SOURCE_DIR CMakeCache.txt | cut -d "=" -f 2`
23 CEPH_BUILD_DIR=`pwd`
24 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
25fi
26
27# use CEPH_BUILD_ROOT to vstart from a 'make install'
28if [ -n "$CEPH_BUILD_ROOT" ]; then
29 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
30 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
31 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
32 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
33elif [ -n "$CEPH_ROOT" ]; then
34 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
35 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
36 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
37 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BUILD_DIR/bin/init-ceph
38 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
39 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
40 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
41fi
42
43if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
44 PATH=$(pwd):$PATH
45fi
46
47[ -z "$PYBIND" ] && PYBIND=./pybind
48
49export PYTHONPATH=$PYBIND:$CEPH_LIB/cython_modules/lib.2:$PYTHONPATH
50export LD_LIBRARY_PATH=$CEPH_LIB:$LD_LIBRARY_PATH
51export DYLD_LIBRARY_PATH=$CEPH_LIB:$DYLD_LIBRARY_PATH
52
53[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
54[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
55[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
56[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
57[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
58[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
59
60# if none of the CEPH_NUM_* number is specified, kill the existing
61# cluster.
62if [ -z "$CEPH_NUM_MON" -a \
63 -z "$CEPH_NUM_OSD" -a \
64 -z "$CEPH_NUM_MDS" -a \
65 -z "$CEPH_NUM_MGR" ]; then
66 kill_all=1
67else
68 kill_all=0
69fi
70
71[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
72[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
73[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
74[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
75[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
76[ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
77[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
78
79[ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
80[ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
81[ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
82[ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
83[ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
84
85if [ $CEPH_NUM_OSD -gt 3 ]; then
86 OSD_POOL_DEFAULT_SIZE=3
87else
88 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
89fi
90
91extra_conf=""
92new=0
93standby=0
94debug=0
95ip=""
96nodaemon=0
97smallmds=0
98short=0
99ec=0
100hitset=""
101overwrite_conf=1
102cephx=1 #turn cephx on by default
103cache=""
104memstore=0
105bluestore=0
106rgw_frontend="civetweb"
107lockdep=${LOCKDEP:-1}
108
109VSTART_SEC="client.vstart.sh"
110
111MON_ADDR=""
112
113conf_fn="$CEPH_CONF_PATH/ceph.conf"
114keyring_fn="$CEPH_CONF_PATH/keyring"
115osdmap_fn="/tmp/ceph_osdmap.$$"
116monmap_fn="/tmp/ceph_monmap.$$"
117
118usage="usage: $0 [option]... \nex: $0 -n -d --mon_num 3 --osd_num 3 --mds_num 1 --rgw_num 1\n"
119usage=$usage"options:\n"
120usage=$usage"\t-d, --debug\n"
121usage=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
122usage=$usage"\t-l, --localhost: use localhost instead of hostname\n"
123usage=$usage"\t-i <ip>: bind to specific ip\n"
124usage=$usage"\t-n, --new\n"
125usage=$usage"\t-N, --not-new: reuse existing cluster config (default)\n"
126usage=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
127usage=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
128usage=$usage"\t--smallmds: limit mds cache size\n"
129usage=$usage"\t-m ip:port\t\tspecify monitor address\n"
130usage=$usage"\t-k keep old configuration files\n"
131usage=$usage"\t-x enable cephx (on by default)\n"
132usage=$usage"\t-X disable cephx\n"
133usage=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
134usage=$usage"\t-e : create an erasure pool\n";
135usage=$usage"\t-o config\t\t add extra config parameters to all sections\n"
136usage=$usage"\t--mon_num specify ceph monitor count\n"
137usage=$usage"\t--osd_num specify ceph osd count\n"
138usage=$usage"\t--mds_num specify ceph mds count\n"
139usage=$usage"\t--rgw_num specify ceph rgw count\n"
140usage=$usage"\t--mgr_num specify ceph mgr count\n"
141usage=$usage"\t--rgw_port specify ceph rgw http listen port\n"
142usage=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
143usage=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend\n"
144usage=$usage"\t--memstore use memstore as the osd objectstore backend\n"
145usage=$usage"\t--cache <pool>: enable cache tiering on pool\n"
146usage=$usage"\t--short: short object names only; necessary for ext4 dev\n"
147usage=$usage"\t--nolockdep disable lockdep\n"
148usage=$usage"\t--multimds <count> allow multimds with maximum active count\n"
149
150usage_exit() {
151 printf "$usage"
152 exit
153}
154
155while [ $# -ge 1 ]; do
156case $1 in
157 -d | --debug )
158 debug=1
159 ;;
160 -s | --standby_mds)
161 standby=1
162 ;;
163 -l | --localhost )
164 ip="127.0.0.1"
165 ;;
166 -i )
167 [ -z "$2" ] && usage_exit
168 ip="$2"
169 shift
170 ;;
171 -e )
172 ec=1
173 ;;
174 --new | -n )
175 new=1
176 ;;
177 --not-new | -N )
178 new=0
179 ;;
180 --short )
181 short=1
182 ;;
183 --valgrind )
184 [ -z "$2" ] && usage_exit
185 valgrind=$2
186 shift
187 ;;
188 --valgrind_args )
189 valgrind_args="$2"
190 shift
191 ;;
192 --valgrind_mds )
193 [ -z "$2" ] && usage_exit
194 valgrind_mds=$2
195 shift
196 ;;
197 --valgrind_osd )
198 [ -z "$2" ] && usage_exit
199 valgrind_osd=$2
200 shift
201 ;;
202 --valgrind_mon )
203 [ -z "$2" ] && usage_exit
204 valgrind_mon=$2
205 shift
206 ;;
207 --valgrind_mgr )
208 [ -z "$2" ] && usage_exit
209 valgrind_mgr=$2
210 shift
211 ;;
212 --valgrind_rgw )
213 [ -z "$2" ] && usage_exit
214 valgrind_rgw=$2
215 shift
216 ;;
217 --nodaemon )
218 nodaemon=1
219 ;;
220 --smallmds )
221 smallmds=1
222 ;;
223 --mon_num )
224 echo "mon_num:$2"
225 CEPH_NUM_MON="$2"
226 shift
227 ;;
228 --osd_num )
229 CEPH_NUM_OSD=$2
230 shift
231 ;;
232 --mds_num )
233 CEPH_NUM_MDS=$2
234 shift
235 ;;
236 --rgw_num )
237 CEPH_NUM_RGW=$2
238 shift
239 ;;
240 --mgr_num )
241 CEPH_NUM_MGR=$2
242 shift
243 ;;
244 --rgw_port )
245 CEPH_RGW_PORT=$2
246 shift
247 ;;
248 --rgw_frontend )
249 rgw_frontend=$2
250 shift
251 ;;
252 -m )
253 [ -z "$2" ] && usage_exit
254 MON_ADDR=$2
255 shift
256 ;;
257 -x )
258 cephx=1 # this is on be default, flag exists for historical consistency
259 ;;
260 -X )
261 cephx=0
262 ;;
263 -k )
264 if [ ! -r $conf_fn ]; then
265 echo "cannot use old configuration: $conf_fn not readable." >&2
266 exit
267 fi
268 overwrite_conf=0
269 ;;
270 --memstore )
271 memstore=1
272 ;;
273 -b | --bluestore )
274 bluestore=1
275 ;;
276 --hitset )
277 hitset="$hitset $2 $3"
278 shift
279 shift
280 ;;
281 -o )
282 extra_conf="$extra_conf $2
283"
284 shift
285 ;;
286 --cache )
287 if [ -z "$cache" ]; then
288 cache="$2"
289 else
290 cache="$cache $2"
291 fi
292 shift
293 ;;
294 --nolockdep )
295 lockdep=0
296 ;;
297 --multimds)
298 CEPH_MAX_MDS="$2"
299 shift
300 ;;
301 * )
302 usage_exit
303esac
304shift
305done
306
307if [ $kill_all -eq 1 ]; then
308 $SUDO $INIT_CEPH stop
309fi
310
311if [ "$overwrite_conf" -eq 0 ]; then
312 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mon 2>/dev/null` && \
313 CEPH_NUM_MON="$MON"
314 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_osd 2>/dev/null` && \
315 CEPH_NUM_OSD="$OSD"
316 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mds 2>/dev/null` && \
317 CEPH_NUM_MDS="$MDS"
318 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mgr 2>/dev/null` && \
319 CEPH_NUM_MGR="$MGR"
320 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_rgw 2>/dev/null` && \
321 CEPH_NUM_RGW="$RGW"
322else
323 if [ "$new" -ne 0 ]; then
324 # only delete if -n
325 [ -e "$conf_fn" ] && rm -- "$conf_fn"
326 else
327 # -k is implied... (doesn't make sense otherwise)
328 overwrite_conf=0
329 fi
330fi
331
332ARGS="-c $conf_fn"
333
334prunb() {
335 echo "$* &"
336 "$@" &
337}
338
339prun() {
340 echo "$*"
341 "$@"
342}
343
344run() {
345 type=$1
346 shift
347 eval "valg=\$valgrind_$type"
348 [ -z "$valg" ] && valg="$valgrind"
349
350 if [ -n "$valg" ]; then
351 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
352 sleep 1
353 else
354 if [ "$nodaemon" -eq 0 ]; then
355 prun "$@"
356 else
357 prunb ./ceph-run "$@" -f
358 fi
359 fi
360}
361
362wconf() {
363 if [ "$overwrite_conf" -eq 1 ]; then
364 cat >> "$conf_fn"
365 fi
366}
367
368prepare_conf() {
369 local DAEMONOPTS="
370 log file = $CEPH_OUT_DIR/\$name.log
371 admin socket = $CEPH_OUT_DIR/\$name.asok
372 chdir = \"\"
373 pid file = $CEPH_OUT_DIR/\$name.pid
374 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
375"
376
377 wconf <<EOF
378; generated by vstart.sh on `date`
379[$VSTART_SEC]
380 num mon = $CEPH_NUM_MON
381 num osd = $CEPH_NUM_OSD
382 num mds = $CEPH_NUM_MDS
383 num mgr = $CEPH_NUM_MGR
384 num rgw = $CEPH_NUM_RGW
385
386[global]
387 fsid = $(uuidgen)
388 osd pg bits = 3
389 osd pgp bits = 5 ; (invalid, but ceph should cope!)
390 osd pool default size = $OSD_POOL_DEFAULT_SIZE
391 osd crush chooseleaf type = 0
392 osd pool default min size = 1
393 osd failsafe full ratio = .99
394 mon osd reporter subtree level = osd
395 mon osd full ratio = .99
396 mon data avail warn = 10
397 mon data avail crit = 1
398 erasure code dir = $EC_PATH
399 plugin dir = $CEPH_LIB
400 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd
401 rgw frontends = $rgw_frontend port=$CEPH_RGW_PORT
402 filestore fd cache size = 32
403 run dir = $CEPH_OUT_DIR
404 enable experimental unrecoverable data corrupting features = *
405EOF
406 if [ "$lockdep" -eq 1 ] ; then
407 wconf <<EOF
408 lockdep = true
409EOF
410 fi
411 if [ "$cephx" -eq 1 ] ; then
412 wconf <<EOF
413 auth cluster required = cephx
414 auth service required = cephx
415 auth client required = cephx
416EOF
417 else
418 wconf <<EOF
419 auth cluster required = none
420 auth service required = none
421 auth client required = none
422EOF
423 fi
424 if [ "$short" -eq 1 ]; then
425 COSDSHORT=" osd max object name len = 460
426 osd max object namespace len = 64"
427 fi
428 wconf <<EOF
429[client]
430 keyring = $keyring_fn
431 log file = $CEPH_OUT_DIR/\$name.\$pid.log
432 admin socket = $CEPH_OUT_DIR/\$name.\$pid.asok
433
434[mds]
435$DAEMONOPTS
436$CMDSDEBUG
437 mds debug frag = true
438 mds debug auth pins = true
439 mds debug subtrees = true
440 mds data = $CEPH_DEV_DIR/mds.\$id
441 mds root ino uid = `id -u`
442 mds root ino gid = `id -g`
443$extra_conf
444[mgr]
445 mgr modules = rest fsstatus
446 mgr data = $CEPH_DEV_DIR/mgr.\$id
447 mgr module path = $MGR_PYTHON_PATH
448 mon reweight min pgs per osd = 4
449$DAEMONOPTS
450$CMGRDEBUG
451$extra_conf
452[osd]
453$DAEMONOPTS
454 osd_check_max_object_name_len_on_startup = false
455 osd data = $CEPH_DEV_DIR/osd\$id
456 osd journal = $CEPH_DEV_DIR/osd\$id/journal
457 osd journal size = 100
458 osd class tmp = out
459 osd class dir = $OBJCLASS_PATH
460 osd class load list = *
461 osd class default list = *
462 osd scrub load threshold = 2000.0
463 osd debug op order = true
464 osd debug misdirected ops = true
465 filestore wbthrottle xfs ios start flusher = 10
466 filestore wbthrottle xfs ios hard limit = 20
467 filestore wbthrottle xfs inodes hard limit = 30
468 filestore wbthrottle btrfs ios start flusher = 10
469 filestore wbthrottle btrfs ios hard limit = 20
470 filestore wbthrottle btrfs inodes hard limit = 30
471 osd copyfrom max chunk = 524288
472 bluestore fsck on mount = true
473 bluestore block create = true
474 bluestore block db size = 67108864
475 bluestore block db create = true
476 bluestore block wal size = 1048576000
477 bluestore block wal create = true
478$COSDDEBUG
479$COSDMEMSTORE
480$COSDSHORT
481$extra_conf
482[mon]
483 mon pg warn min per osd = 3
484 mon osd allow primary affinity = true
485 mon osd allow pg upmap = true
486 mon reweight min pgs per osd = 4
487 mon osd prime pg temp = true
488 crushtool = $CEPH_BIN/crushtool
489 mon allow pool delete = true
490$DAEMONOPTS
491$CMONDEBUG
492$extra_conf
493 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
494[global]
495$extra_conf
496EOF
497}
498
499start_mon() {
500 local MONS=""
501 local count=0
502 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
503 do
504 [ $count -eq $CEPH_NUM_MON ] && break;
505 count=$(($count + 1))
506 if [ -z "$MONS" ];
507 then
508 MONS="$f"
509 else
510 MONS="$MONS $f"
511 fi
512 done
513
514 if [ "$new" -eq 1 ]; then
515 if [ `echo $IP | grep '^127\\.'` ]
516 then
517 echo
518 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
519 echo " connect. either adjust /etc/hosts, or edit this script to use your"
520 echo " machine's real IP."
521 echo
522 fi
523
524 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
525 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin --set-uid=0 \
526 --cap mon 'allow *' \
527 --cap osd 'allow *' \
528 --cap mds 'allow *' \
529 --cap mgr 'allow *' \
530 "$keyring_fn"
531
532 # build a fresh fs monmap, mon fs
533 local str=""
534 local count=0
535 for f in $MONS
536 do
537 str="$str --add $f $IP:$(($CEPH_PORT+$count))"
538 wconf <<EOF
539[mon.$f]
540 host = $HOSTNAME
541 mon data = $CEPH_DEV_DIR/mon.$f
542 mon addr = $IP:$(($CEPH_PORT+$count))
543EOF
544 count=$(($count + 1))
545 done
546 prun "$CEPH_BIN/monmaptool" --create --clobber $str --print "$monmap_fn"
547
548 for f in $MONS
549 do
550 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
551 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
552 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
553 done
554
555 prun rm -- "$monmap_fn"
556 fi
557
558 # start monitors
559 for f in $MONS
560 do
561 run 'mon' $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
562 done
563}
564
565start_osd() {
566 for osd in `seq 0 $((CEPH_NUM_OSD-1))`
567 do
568 if [ "$new" -eq 1 ]; then
569 wconf <<EOF
570[osd.$osd]
571 host = $HOSTNAME
572EOF
573
574 rm -rf $CEPH_DEV_DIR/osd$osd || true
575 if command -v btrfs > /dev/null; then
576 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
577 fi
578 mkdir -p $CEPH_DEV_DIR/osd$osd
579
580 local uuid=`uuidgen`
581 echo "add osd$osd $uuid"
582 ceph_adm osd create $uuid
583 ceph_adm osd crush add osd.$osd 1.0 host=$HOSTNAME root=default
584 $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS --mkfs --mkkey --osd-uuid $uuid
585
586 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
587 echo adding osd$osd key to auth repository
588 ceph_adm -i "$key_fn" auth add osd.$osd osd "allow *" mon "allow profile osd" mgr "allow profile osd"
589 fi
590 echo start osd$osd
591 run 'osd' $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS $COSD_ARGS
592 done
593}
594
595start_mgr() {
596 local mgr=0
597 for name in x y z a b c d e f g h i j k l m n o p
598 do
599 [ $mgr -eq $CEPH_NUM_MGR ] && break
600 mgr=$(($mgr + 1))
601 if [ "$new" -eq 1 ]; then
602 mkdir -p $CEPH_DEV_DIR/mgr.$name
603 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
604 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
605 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
606 fi
607
608 wconf <<EOF
609[mgr.$name]
610 host = $HOSTNAME
611EOF
612
613 echo "Starting mgr.${name}"
614 run 'mgr' $CEPH_BIN/ceph-mgr -i $name $ARGS
615 done
616}
617
618start_mds() {
619 if [ $new -eq 1 ]; then
620 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
621 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
622 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
623 fi
624
625 local fs=0
626 for name in a b c d e f g h i j k l m n o p
627 do
628 ceph_adm osd pool create "cephfs_data_${name}" 8
629 ceph_adm osd pool create "cephfs_metadata_${name}" 8
630 ceph_adm fs new "cephfs_${name}" "cephfs_metadata_${name}" "cephfs_data_${name}"
631 fs=$(($fs + 1))
632 [ $fs -eq $CEPH_NUM_FS ] && break
633 done
634 fi
635 fi
636
637 local mds=0
638 for name in a b c d e f g h i j k l m n o p
639 do
640 [ $mds -eq $CEPH_NUM_MDS ] && break
641 mds=$(($mds + 1))
642
643 if [ "$new" -eq 1 ]; then
644 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
645 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
646 wconf <<EOF
647[mds.$name]
648 host = $HOSTNAME
649EOF
650 if [ "$standby" -eq 1 ]; then
651 mkdir -p $CEPH_DEV_DIR/mds.${name}s
652 wconf <<EOF
653 mds standby for rank = $mds
654[mds.${name}s]
655 mds standby replay = true
656 mds standby for name = ${name}
657EOF
658 fi
659 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
660 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
661 if [ "$standby" -eq 1 ]; then
662 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
663 "$CEPH_DEV_DIR/mds.${name}s/keyring"
664 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
665 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
666 fi
667 fi
668
669 run 'mds' $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
670 if [ "$standby" -eq 1 ]; then
671 run 'mds' $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
672 fi
673
674 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
675 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
676 #ceph_adm mds set max_mds 2
677 done
678}
679
680if [ "$debug" -eq 0 ]; then
681 CMONDEBUG='
682 debug mon = 10
683 debug ms = 1'
684 COSDDEBUG='
685 debug ms = 1'
686 CMDSDEBUG='
687 debug ms = 1'
688else
689 echo "** going verbose **"
690 CMONDEBUG='
691 debug mon = 20
692 debug paxos = 20
693 debug auth = 20
694 debug mgrc = 20
695 debug ms = 1'
696 COSDDEBUG='
697 debug ms = 1
698 debug osd = 25
699 debug objecter = 20
700 debug monc = 20
701 debug mgrc = 20
702 debug journal = 20
703 debug filestore = 20
704 debug bluestore = 30
705 debug bluefs = 20
706 debug rocksdb = 10
707 debug bdev = 20
708 debug rgw = 20
709 debug objclass = 20'
710 CMDSDEBUG='
711 debug ms = 1
712 debug mds = 20
713 debug auth = 20
714 debug monc = 20
715 debug mgrc = 20
716 mds debug scatterstat = true
717 mds verify scatter = true
718 mds log max segments = 2'
719 CMGRDEBUG='
720 debug ms = 1
721 debug monc = 20
722 debug mgr = 20'
723fi
724
725if [ -n "$MON_ADDR" ]; then
726 CMON_ARGS=" -m "$MON_ADDR
727 COSD_ARGS=" -m "$MON_ADDR
728 CMDS_ARGS=" -m "$MON_ADDR
729fi
730
731if [ "$memstore" -eq 1 ]; then
732 COSDMEMSTORE='
733 osd objectstore = memstore'
734fi
735if [ "$bluestore" -eq 1 ]; then
736 COSDMEMSTORE='
737 osd objectstore = bluestore'
738fi
739
740if [ -z "$CEPH_PORT" ]; then
741 CEPH_PORT=6789
742 [ -e ".ceph_port" ] && CEPH_PORT=`cat .ceph_port`
743fi
744
745[ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
746
747# sudo if btrfs
748test -d $CEPH_DEV_DIR/osd0/. && test -e $CEPH_DEV_DIR/sudo && SUDO="sudo"
749
750prun $SUDO rm -f core*
751
752test -d $CEPH_OUT_DIR || mkdir $CEPH_OUT_DIR
753test -d $CEPH_DEV_DIR || mkdir $CEPH_DEV_DIR
754$SUDO rm -rf $CEPH_OUT_DIR/*
755test -d gmon && $SUDO rm -rf gmon/*
756
757[ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && test -e $keyring_fn && rm $keyring_fn
758
759
760# figure machine's ip
761HOSTNAME=`hostname -s`
762if [ -n "$ip" ]; then
763 IP="$ip"
764else
765 echo hostname $HOSTNAME
766 if [ -x "$(which ip 2>/dev/null)" ]; then
767 IP_CMD="ip addr"
768 else
769 IP_CMD="ifconfig"
770 fi
771 # filter out IPv6 and localhost addresses
772 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
773 # if nothing left, try using localhost address, it might work
774 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
775fi
776echo "ip $IP"
777echo "port $CEPH_PORT"
778
779
780[ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
781
782ceph_adm() {
783 if [ "$cephx" -eq 1 ]; then
784 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
785 else
786 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
787 fi
788}
789
790if [ "$new" -eq 1 ]; then
791 prepare_conf
792fi
793
794if [ $CEPH_NUM_MON -gt 0 ]; then
795 start_mon
796fi
797
798# osd
799if [ $CEPH_NUM_OSD -gt 0 ]; then
800 start_osd
801fi
802
803# mds
804if [ "$smallmds" -eq 1 ]; then
805 wconf <<EOF
806[mds]
807 mds log max segments = 2
808 mds cache size = 10000
809EOF
810fi
811
812if [ $CEPH_NUM_MDS -gt 0 ]; then
813 start_mds
814fi
815
816# Don't set max_mds until all the daemons are started, otherwise
817# the intended standbys might end up in active roles.
818if [ "$CEPH_MAX_MDS" -gt 1 ]; then
819 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
820fi
821fs=0
822for name in a b c d e f g h i j k l m n o p
823do
824 [ $fs -eq $CEPH_NUM_FS ] && break
825 fs=$(($fs + 1))
826 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
827 ceph_adm fs set "cephfs_${name}" allow_multimds true --yes-i-really-mean-it
828 ceph_adm fs set "cephfs_${name}" max_mds "$CEPH_MAX_MDS"
829 fi
830done
831
832# mgr
833
834if [ $CEPH_NUM_MGR -gt 0 ]; then
835 start_mgr
836fi
837
838if [ "$ec" -eq 1 ]; then
839 ceph_adm <<EOF
840osd erasure-code-profile set ec-profile m=2 k=2
841osd pool create ec 8 8 erasure ec-profile
842EOF
843fi
844
845do_cache() {
846 while [ -n "$*" ]; do
847 p="$1"
848 shift
849 echo "creating cache for pool $p ..."
850 ceph_adm <<EOF
851osd pool create ${p}-cache 8
852osd tier add $p ${p}-cache
853osd tier cache-mode ${p}-cache writeback
854osd tier set-overlay $p ${p}-cache
855EOF
856 done
857}
858do_cache $cache
859
860do_hitsets() {
861 while [ -n "$*" ]; do
862 pool="$1"
863 type="$2"
864 shift
865 shift
866 echo "setting hit_set on pool $pool type $type ..."
867 ceph_adm <<EOF
868osd pool set $pool hit_set_type $type
869osd pool set $pool hit_set_count 8
870osd pool set $pool hit_set_period 30
871EOF
872 done
873}
874do_hitsets $hitset
875
876do_rgw()
877{
878 # Create S3 user
879 local akey='0555b35654ad1656d804'
880 local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
881 echo "setting up user testid"
882 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
883
884 # Create S3-test users
885 # See: https://github.com/ceph/s3-tests
886 echo "setting up s3-test users"
887 $CEPH_BIN/radosgw-admin user create \
888 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
889 --access-key ABCDEFGHIJKLMNOPQRST \
890 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
891 --display-name youruseridhere \
892 --email s3@example.com -c $conf_fn > /dev/null
893 $CEPH_BIN/radosgw-admin user create \
894 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
895 --access-key NOPQRSTUVWXYZABCDEFG \
896 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
897 --display-name john.doe \
898 --email john.doe@example.com -c $conf_fn > /dev/null
899
900 # Create Swift user
901 echo "setting up user tester"
902 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
903
904 echo ""
905 echo "S3 User Info:"
906 echo " access key: $akey"
907 echo " secret key: $skey"
908 echo ""
909 echo "Swift User Info:"
910 echo " account : test"
911 echo " user : tester"
912 echo " password : testing"
913 echo ""
914
915 # Start server
916 echo start rgw on http://localhost:$CEPH_RGW_PORT
917 RGWDEBUG=""
918 if [ "$debug" -ne 0 ]; then
919 RGWDEBUG="--debug-rgw=20"
920 fi
921
922 RGWSUDO=
923 [ $CEPH_RGW_PORT -lt 1024 ] && RGWSUDO=sudo
924 n=$(($CEPH_NUM_RGW - 1))
925 for rgw in `seq 0 $n`; do
926 run 'rgw' $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn --log-file=${CEPH_OUT_DIR}/rgw.$rgw.log ${RGWDEBUG} --debug-ms=1
927 done
928}
929if [ "$CEPH_NUM_RGW" -gt 0 ]; then
930 do_rgw
931fi
932
933echo "started. stop.sh to stop. see out/* (e.g. 'tail -f out/????') for debug output."
934
935echo ""
936echo "export PYTHONPATH=./pybind:$PYTHONPATH"
937echo "export LD_LIBRARY_PATH=$CEPH_LIB"
938
939if [ "$CEPH_DIR" != "$PWD" ]; then
940 echo "export CEPH_CONF=$conf_fn"
941 echo "export CEPH_KEYRING=$keyring_fn"
942fi