]> git.proxmox.com Git - ceph.git/blob - ceph/src/vstart.sh
import ceph 16.2.7
[ceph.git] / ceph / src / vstart.sh
1 #!/usr/bin/env bash
2 # -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
3 # vim: softtabstop=4 shiftwidth=4 expandtab
4
5 # abort on failure
6 set -e
7
8 quoted_print() {
9 for s in "$@"; do
10 if [[ "$s" =~ \ ]]; then
11 printf -- "'%s' " "$s"
12 else
13 printf -- "$s "
14 fi
15 done
16 printf '\n'
17 }
18
19 debug() {
20 "$@" >&2
21 }
22
23 prunb() {
24 debug quoted_print "$@" '&'
25 "$@" &
26 }
27
28 prun() {
29 debug quoted_print "$@"
30 "$@"
31 }
32
33
34 if [ -n "$VSTART_DEST" ]; then
35 SRC_PATH=`dirname $0`
36 SRC_PATH=`(cd $SRC_PATH; pwd)`
37
38 CEPH_DIR=$SRC_PATH
39 CEPH_BIN=${PWD}/bin
40 CEPH_LIB=${PWD}/lib
41
42 CEPH_CONF_PATH=$VSTART_DEST
43 CEPH_DEV_DIR=$VSTART_DEST/dev
44 CEPH_OUT_DIR=$VSTART_DEST/out
45 CEPH_ASOK_DIR=$VSTART_DEST/out
46 fi
47
48 get_cmake_variable() {
49 local variable=$1
50 grep "${variable}:" CMakeCache.txt | cut -d "=" -f 2
51 }
52
53 # for running out of the CMake build directory
54 if [ -e CMakeCache.txt ]; then
55 # Out of tree build, learn source location from CMakeCache.txt
56 CEPH_ROOT=$(get_cmake_variable ceph_SOURCE_DIR)
57 CEPH_BUILD_DIR=`pwd`
58 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
59 fi
60
61 # use CEPH_BUILD_ROOT to vstart from a 'make install'
62 if [ -n "$CEPH_BUILD_ROOT" ]; then
63 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
64 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
65 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_ROOT/external/lib
66 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
67 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
68 # make install should install python extensions into PYTHONPATH
69 elif [ -n "$CEPH_ROOT" ]; then
70 [ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL=$CEPH_ROOT/src/tools/cephfs/cephfs-shell
71 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
72 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
73 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
74 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
75 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
76 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_DIR/external/lib
77 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
78 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
79 [ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON=$CEPH_ROOT/src/python-common
80 fi
81
82 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
83 PATH=$(pwd):$PATH
84 fi
85
86 [ -z "$PYBIND" ] && PYBIND=./pybind
87
88 [ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON="$CEPH_PYTHON_COMMON:"
89 CYTHON_PYTHONPATH="$CEPH_LIB/cython_modules/lib.3"
90 export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
91
92 export LD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$LD_LIBRARY_PATH
93 export DYLD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$DYLD_LIBRARY_PATH
94 # Suppress logging for regular use that indicated that we are using a
95 # development version. vstart.sh is only used during testing and
96 # development
97 export CEPH_DEV=1
98
99 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
100 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
101 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
102 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
103 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
104 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
105 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM="$NFS"
106
107 # if none of the CEPH_NUM_* number is specified, kill the existing
108 # cluster.
109 if [ -z "$CEPH_NUM_MON" -a \
110 -z "$CEPH_NUM_OSD" -a \
111 -z "$CEPH_NUM_MDS" -a \
112 -z "$CEPH_NUM_MGR" -a \
113 -z "$GANESHA_DAEMON_NUM" ]; then
114 kill_all=1
115 else
116 kill_all=0
117 fi
118
119 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
120 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
121 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
122 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
123 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
124 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
125 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
126 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM=0
127
128 [ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
129 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
130 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
131 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
132 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
133
134 if [ $CEPH_NUM_OSD -gt 3 ]; then
135 OSD_POOL_DEFAULT_SIZE=3
136 else
137 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
138 fi
139
140 extra_conf=""
141 new=0
142 standby=0
143 debug=0
144 ip=""
145 nodaemon=0
146 redirect=0
147 smallmds=0
148 short=0
149 ec=0
150 cephadm=0
151 parallel=true
152 hitset=""
153 overwrite_conf=0
154 cephx=1 #turn cephx on by default
155 gssapi_authx=0
156 cache=""
157 if [ `uname` = FreeBSD ]; then
158 objectstore="filestore"
159 else
160 objectstore="bluestore"
161 fi
162 ceph_osd=ceph-osd
163 rgw_frontend="beast"
164 rgw_compression=""
165 lockdep=${LOCKDEP:-1}
166 spdk_enabled=0 #disable SPDK by default
167 zoned_enabled=0
168 io_uring_enabled=0
169 with_jaeger=0
170
171 with_mgr_dashboard=true
172 if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
173 [[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
174 debug echo "ceph-mgr dashboard not built - disabling."
175 with_mgr_dashboard=false
176 fi
177
178 filestore_path=
179 kstore_path=
180 bluestore_dev=
181
182 VSTART_SEC="client.vstart.sh"
183
184 MON_ADDR=""
185 DASH_URLS=""
186 RESTFUL_URLS=""
187
188 conf_fn="$CEPH_CONF_PATH/ceph.conf"
189 keyring_fn="$CEPH_CONF_PATH/keyring"
190 osdmap_fn="/tmp/ceph_osdmap.$$"
191 monmap_fn="/tmp/ceph_monmap.$$"
192 inc_osd_num=0
193
194 msgr="21"
195
196 usage="usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d\n"
197 usage=$usage"options:\n"
198 usage=$usage"\t-d, --debug\n"
199 usage=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
200 usage=$usage"\t-l, --localhost: use localhost instead of hostname\n"
201 usage=$usage"\t-i <ip>: bind to specific ip\n"
202 usage=$usage"\t-n, --new\n"
203 usage=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
204 usage=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
205 usage=$usage"\t--redirect-output: only useful with nodaemon, directs output to log file\n"
206 usage=$usage"\t--smallmds: limit mds cache memory limit\n"
207 usage=$usage"\t-m ip:port\t\tspecify monitor address\n"
208 usage=$usage"\t-k keep old configuration files (default)\n"
209 usage=$usage"\t-x enable cephx (on by default)\n"
210 usage=$usage"\t-X disable cephx\n"
211 usage=$usage"\t-g --gssapi enable Kerberos/GSSApi authentication\n"
212 usage=$usage"\t-G disable Kerberos/GSSApi authentication\n"
213 usage=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
214 usage=$usage"\t-e : create an erasure pool\n";
215 usage=$usage"\t-o config\t\t add extra config parameters to all sections\n"
216 usage=$usage"\t--rgw_port specify ceph rgw http listen port\n"
217 usage=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
218 usage=$usage"\t--rgw_compression specify the rgw compression plugin\n"
219 usage=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend (default)\n"
220 usage=$usage"\t-f, --filestore use filestore as the osd objectstore backend\n"
221 usage=$usage"\t-K, --kstore use kstore as the osd objectstore backend\n"
222 usage=$usage"\t--memstore use memstore as the osd objectstore backend\n"
223 usage=$usage"\t--cache <pool>: enable cache tiering on pool\n"
224 usage=$usage"\t--short: short object names only; necessary for ext4 dev\n"
225 usage=$usage"\t--nolockdep disable lockdep\n"
226 usage=$usage"\t--multimds <count> allow multimds with maximum active count\n"
227 usage=$usage"\t--without-dashboard: do not run using mgr dashboard\n"
228 usage=$usage"\t--bluestore-spdk: enable SPDK and with a comma-delimited list of PCI-IDs of NVME device (e.g, 0000:81:00.0)\n"
229 usage=$usage"\t--msgr1: use msgr1 only\n"
230 usage=$usage"\t--msgr2: use msgr2 only\n"
231 usage=$usage"\t--msgr21: use msgr2 and msgr1\n"
232 usage=$usage"\t--crimson: use crimson-osd instead of ceph-osd\n"
233 usage=$usage"\t--osd-args: specify any extra osd specific options\n"
234 usage=$usage"\t--bluestore-devs: comma-separated list of blockdevs to use for bluestore\n"
235 usage=$usage"\t--bluestore-zoned: blockdevs listed by --bluestore-devs are zoned devices (HM-SMR HDD or ZNS SSD)\n"
236 usage=$usage"\t--bluestore-io-uring: enable io_uring backend\n"
237 usage=$usage"\t--inc-osd: append some more osds into existing vcluster\n"
238 usage=$usage"\t--cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]\n"
239 usage=$usage"\t--no-parallel: dont start all OSDs in parallel\n"
240 usage=$usage"\t--jaeger: use jaegertracing for tracing\n"
241
242 usage_exit() {
243 printf "$usage"
244 exit
245 }
246
247 while [ $# -ge 1 ]; do
248 case $1 in
249 -d | --debug)
250 debug=1
251 ;;
252 -s | --standby_mds)
253 standby=1
254 ;;
255 -l | --localhost)
256 ip="127.0.0.1"
257 ;;
258 -i)
259 [ -z "$2" ] && usage_exit
260 ip="$2"
261 shift
262 ;;
263 -e)
264 ec=1
265 ;;
266 --new | -n)
267 new=1
268 ;;
269 --inc-osd)
270 new=0
271 kill_all=0
272 inc_osd_num=$2
273 if [ "$inc_osd_num" == "" ]; then
274 inc_osd_num=1
275 else
276 shift
277 fi
278 ;;
279 --short)
280 short=1
281 ;;
282 --crimson)
283 ceph_osd=crimson-osd
284 ;;
285 --osd-args)
286 extra_osd_args="$2"
287 shift
288 ;;
289 --msgr1)
290 msgr="1"
291 ;;
292 --msgr2)
293 msgr="2"
294 ;;
295 --msgr21)
296 msgr="21"
297 ;;
298 --cephadm)
299 cephadm=1
300 ;;
301 --no-parallel)
302 parallel=false
303 ;;
304 --valgrind)
305 [ -z "$2" ] && usage_exit
306 valgrind=$2
307 shift
308 ;;
309 --valgrind_args)
310 valgrind_args="$2"
311 shift
312 ;;
313 --valgrind_mds)
314 [ -z "$2" ] && usage_exit
315 valgrind_mds=$2
316 shift
317 ;;
318 --valgrind_osd)
319 [ -z "$2" ] && usage_exit
320 valgrind_osd=$2
321 shift
322 ;;
323 --valgrind_mon)
324 [ -z "$2" ] && usage_exit
325 valgrind_mon=$2
326 shift
327 ;;
328 --valgrind_mgr)
329 [ -z "$2" ] && usage_exit
330 valgrind_mgr=$2
331 shift
332 ;;
333 --valgrind_rgw)
334 [ -z "$2" ] && usage_exit
335 valgrind_rgw=$2
336 shift
337 ;;
338 --nodaemon)
339 nodaemon=1
340 ;;
341 --redirect-output)
342 redirect=1
343 ;;
344 --smallmds)
345 smallmds=1
346 ;;
347 --rgw_port)
348 CEPH_RGW_PORT=$2
349 shift
350 ;;
351 --rgw_frontend)
352 rgw_frontend=$2
353 shift
354 ;;
355 --rgw_compression)
356 rgw_compression=$2
357 shift
358 ;;
359 --kstore_path)
360 kstore_path=$2
361 shift
362 ;;
363 --filestore_path)
364 filestore_path=$2
365 shift
366 ;;
367 -m)
368 [ -z "$2" ] && usage_exit
369 MON_ADDR=$2
370 shift
371 ;;
372 -x)
373 cephx=1 # this is on be default, flag exists for historical consistency
374 ;;
375 -X)
376 cephx=0
377 ;;
378
379 -g | --gssapi)
380 gssapi_authx=1
381 ;;
382 -G)
383 gssapi_authx=0
384 ;;
385
386 -k)
387 if [ ! -r $conf_fn ]; then
388 echo "cannot use old configuration: $conf_fn not readable." >&2
389 exit
390 fi
391 new=0
392 ;;
393 --memstore)
394 objectstore="memstore"
395 ;;
396 -b | --bluestore)
397 objectstore="bluestore"
398 ;;
399 -f | --filestore)
400 objectstore="filestore"
401 ;;
402 -K | --kstore)
403 objectstore="kstore"
404 ;;
405 --hitset)
406 hitset="$hitset $2 $3"
407 shift
408 shift
409 ;;
410 -o)
411 extra_conf+=$'\n'"$2"
412 shift
413 ;;
414 --cache)
415 if [ -z "$cache" ]; then
416 cache="$2"
417 else
418 cache="$cache $2"
419 fi
420 shift
421 ;;
422 --nolockdep)
423 lockdep=0
424 ;;
425 --multimds)
426 CEPH_MAX_MDS="$2"
427 shift
428 ;;
429 --without-dashboard)
430 with_mgr_dashboard=false
431 ;;
432 --bluestore-spdk)
433 [ -z "$2" ] && usage_exit
434 IFS=',' read -r -a bluestore_spdk_dev <<< "$2"
435 spdk_enabled=1
436 shift
437 ;;
438 --bluestore-devs)
439 IFS=',' read -r -a bluestore_dev <<< "$2"
440 for dev in "${bluestore_dev[@]}"; do
441 if [ ! -b $dev -o ! -w $dev ]; then
442 echo "All --bluestore-devs must refer to writable block devices"
443 exit 1
444 fi
445 done
446 shift
447 ;;
448 --bluestore-zoned)
449 zoned_enabled=1
450 ;;
451 --bluestore-io-uring)
452 io_uring_enabled=1
453 shift
454 ;;
455 --jaeger)
456 with_jaeger=1
457 echo "with_jaeger $with_jaeger"
458 ;;
459 *)
460 usage_exit
461 esac
462 shift
463 done
464
465 if [ $kill_all -eq 1 ]; then
466 $SUDO $INIT_CEPH stop
467 fi
468
469 if [ "$new" -eq 0 ]; then
470 if [ -z "$CEPH_ASOK_DIR" ]; then
471 CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
472 fi
473 mkdir -p $CEPH_ASOK_DIR
474 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
475 CEPH_NUM_MON="$MON"
476 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
477 CEPH_NUM_OSD="$OSD"
478 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
479 CEPH_NUM_MDS="$MDS"
480 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
481 CEPH_NUM_MGR="$MGR"
482 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
483 CEPH_NUM_RGW="$RGW"
484 NFS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
485 GANESHA_DAEMON_NUM="$NFS"
486 else
487 # only delete if -n
488 if [ -e "$conf_fn" ]; then
489 asok_dir=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
490 rm -- "$conf_fn"
491 if [ $asok_dir != /var/run/ceph ]; then
492 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
493 fi
494 fi
495 if [ -z "$CEPH_ASOK_DIR" ]; then
496 CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
497 fi
498 fi
499
500 ARGS="-c $conf_fn"
501
502 run() {
503 type=$1
504 shift
505 num=$1
506 shift
507 eval "valg=\$valgrind_$type"
508 [ -z "$valg" ] && valg="$valgrind"
509
510 if [ -n "$valg" ]; then
511 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
512 sleep 1
513 else
514 if [ "$nodaemon" -eq 0 ]; then
515 prun "$@"
516 elif [ "$redirect" -eq 0 ]; then
517 prunb ${CEPH_ROOT}/src/ceph-run "$@" -f
518 else
519 ( prunb ${CEPH_ROOT}/src/ceph-run "$@" -f ) >$CEPH_OUT_DIR/$type.$num.stdout 2>&1
520 fi
521 fi
522 }
523
524 wconf() {
525 if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
526 cat >> "$conf_fn"
527 fi
528 }
529
530
531 do_rgw_conf() {
532
533 if [ $CEPH_NUM_RGW -eq 0 ]; then
534 return 0
535 fi
536
537 # setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
538 # individual rgw's ids will be their ports.
539 current_port=$CEPH_RGW_PORT
540 for n in $(seq 1 $CEPH_NUM_RGW); do
541 wconf << EOF
542 [client.rgw.${current_port}]
543 rgw frontends = $rgw_frontend port=${current_port}
544 admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
545 EOF
546 current_port=$((current_port + 1))
547 done
548
549 }
550
551 format_conf() {
552 local opts=$1
553 local indent=" "
554 local opt
555 local formatted
556 while read -r opt; do
557 if [ -z "$formatted" ]; then
558 formatted="${opt}"
559 else
560 formatted+=$'\n'${indent}${opt}
561 fi
562 done <<< "$opts"
563 echo "$formatted"
564 }
565
566 prepare_conf() {
567 local DAEMONOPTS="
568 log file = $CEPH_OUT_DIR/\$name.log
569 admin socket = $CEPH_ASOK_DIR/\$name.asok
570 chdir = \"\"
571 pid file = $CEPH_OUT_DIR/\$name.pid
572 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
573 "
574
575 local mgr_modules="restful iostat nfs"
576 if $with_mgr_dashboard; then
577 mgr_modules="dashboard $mgr_modules"
578 fi
579
580 local msgr_conf=''
581 if [ $msgr -eq 21 ]; then
582 msgr_conf="ms bind msgr2 = true
583 ms bind msgr1 = true"
584 fi
585 if [ $msgr -eq 2 ]; then
586 msgr_conf="ms bind msgr2 = true
587 ms bind msgr1 = false"
588 fi
589 if [ $msgr -eq 1 ]; then
590 msgr_conf="ms bind msgr2 = false
591 ms bind msgr1 = true"
592 fi
593
594 wconf <<EOF
595 ; generated by vstart.sh on `date`
596 [$VSTART_SEC]
597 num mon = $CEPH_NUM_MON
598 num osd = $CEPH_NUM_OSD
599 num mds = $CEPH_NUM_MDS
600 num mgr = $CEPH_NUM_MGR
601 num rgw = $CEPH_NUM_RGW
602 num ganesha = $GANESHA_DAEMON_NUM
603
604 [global]
605 fsid = $(uuidgen)
606 osd failsafe full ratio = .99
607 mon osd full ratio = .99
608 mon osd nearfull ratio = .99
609 mon osd backfillfull ratio = .99
610 mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
611 erasure code dir = $EC_PATH
612 plugin dir = $CEPH_LIB
613 filestore fd cache size = 32
614 run dir = $CEPH_OUT_DIR
615 crash dir = $CEPH_OUT_DIR
616 enable experimental unrecoverable data corrupting features = *
617 osd_crush_chooseleaf_type = 0
618 debug asok assert abort = true
619 $(format_conf "${msgr_conf}")
620 $(format_conf "${extra_conf}")
621 EOF
622 if [ "$lockdep" -eq 1 ] ; then
623 wconf <<EOF
624 lockdep = true
625 EOF
626 fi
627 if [ "$cephx" -eq 1 ] ; then
628 wconf <<EOF
629 auth cluster required = cephx
630 auth service required = cephx
631 auth client required = cephx
632 EOF
633 elif [ "$gssapi_authx" -eq 1 ] ; then
634 wconf <<EOF
635 auth cluster required = gss
636 auth service required = gss
637 auth client required = gss
638 gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
639 EOF
640 else
641 wconf <<EOF
642 auth cluster required = none
643 auth service required = none
644 auth client required = none
645 EOF
646 fi
647 if [ "$short" -eq 1 ]; then
648 COSDSHORT=" osd max object name len = 460
649 osd max object namespace len = 64"
650 fi
651 if [ "$objectstore" == "bluestore" ]; then
652 if [ "$spdk_enabled" -eq 1 ]; then
653 BLUESTORE_OPTS=" bluestore_block_db_path = \"\"
654 bluestore_block_db_size = 0
655 bluestore_block_db_create = false
656 bluestore_block_wal_path = \"\"
657 bluestore_block_wal_size = 0
658 bluestore_block_wal_create = false
659 bluestore_spdk_mem = 2048"
660 else
661 BLUESTORE_OPTS=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
662 bluestore block db size = 1073741824
663 bluestore block db create = true
664 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
665 bluestore block wal size = 1048576000
666 bluestore block wal create = true"
667 fi
668 if [ "$zoned_enabled" -eq 1 ]; then
669 BLUESTORE_OPTS+="
670 bluestore min alloc size = 65536
671 bluestore prefer deferred size = 0
672 bluestore prefer deferred size hdd = 0
673 bluestore prefer deferred size ssd = 0
674 bluestore allocator = zoned"
675 fi
676 if [ "$io_uring_enabled" -eq 1 ]; then
677 BLUESTORE_OPTS+="
678 bdev ioring = true"
679 fi
680 fi
681 wconf <<EOF
682 [client]
683 keyring = $keyring_fn
684 log file = $CEPH_OUT_DIR/\$name.\$pid.log
685 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
686
687 ; needed for s3tests
688 rgw crypt s3 kms backend = testing
689 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
690 rgw crypt require ssl = false
691 ; uncomment the following to set LC days as the value in seconds;
692 ; needed for passing lc time based s3-tests (can be verbose)
693 ; rgw lc debug interval = 10
694 $(format_conf "${extra_conf}")
695 EOF
696 do_rgw_conf
697 wconf << EOF
698 [mds]
699 $DAEMONOPTS
700 mds data = $CEPH_DEV_DIR/mds.\$id
701 mds root ino uid = `id -u`
702 mds root ino gid = `id -g`
703 $(format_conf "${extra_conf}")
704 [mgr]
705 mgr data = $CEPH_DEV_DIR/mgr.\$id
706 mgr module path = $MGR_PYTHON_PATH
707 cephadm path = $CEPH_ROOT/src/cephadm/cephadm
708 $DAEMONOPTS
709 $(format_conf "${extra_conf}")
710 [osd]
711 $DAEMONOPTS
712 osd_check_max_object_name_len_on_startup = false
713 osd data = $CEPH_DEV_DIR/osd\$id
714 osd journal = $CEPH_DEV_DIR/osd\$id/journal
715 osd journal size = 100
716 osd class tmp = out
717 osd class dir = $OBJCLASS_PATH
718 osd class load list = *
719 osd class default list = *
720 osd fast shutdown = false
721
722 filestore wbthrottle xfs ios start flusher = 10
723 filestore wbthrottle xfs ios hard limit = 20
724 filestore wbthrottle xfs inodes hard limit = 30
725 filestore wbthrottle btrfs ios start flusher = 10
726 filestore wbthrottle btrfs ios hard limit = 20
727 filestore wbthrottle btrfs inodes hard limit = 30
728 bluestore fsck on mount = true
729 bluestore block create = true
730 $BLUESTORE_OPTS
731
732 ; kstore
733 kstore fsck on mount = true
734 osd objectstore = $objectstore
735 $COSDSHORT
736 $(format_conf "${extra_conf}")
737 [mon]
738 mgr initial modules = $mgr_modules
739 $DAEMONOPTS
740 $CMONDEBUG
741 $(format_conf "${extra_conf}")
742 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
743 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
744 auth allow insecure global id reclaim = false
745 EOF
746 }
747
748 write_logrotate_conf() {
749 out_dir=$(pwd)"/out/*.log"
750
751 cat << EOF
752 $out_dir
753 {
754 rotate 5
755 size 1G
756 copytruncate
757 compress
758 notifempty
759 missingok
760 sharedscripts
761 postrotate
762 # NOTE: assuring that the absence of one of the following processes
763 # won't abort the logrotate command.
764 killall -u $USER -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || echo ""
765 endscript
766 }
767 EOF
768 }
769
770 init_logrotate() {
771 logrotate_conf_path=$(pwd)"/logrotate.conf"
772 logrotate_state_path=$(pwd)"/logrotate.state"
773
774 if ! test -a $logrotate_conf_path; then
775 if test -a $logrotate_state_path; then
776 rm -f $logrotate_state_path
777 fi
778 write_logrotate_conf > $logrotate_conf_path
779 fi
780 }
781
782 start_mon() {
783 local MONS=""
784 local count=0
785 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
786 do
787 [ $count -eq $CEPH_NUM_MON ] && break;
788 count=$(($count + 1))
789 if [ -z "$MONS" ]; then
790 MONS="$f"
791 else
792 MONS="$MONS $f"
793 fi
794 done
795
796 if [ "$new" -eq 1 ]; then
797 if [ `echo $IP | grep '^127\\.'` ]; then
798 echo
799 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
800 echo " connect. either adjust /etc/hosts, or edit this script to use your"
801 echo " machine's real IP."
802 echo
803 fi
804
805 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
806 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
807 --cap mon 'allow *' \
808 --cap osd 'allow *' \
809 --cap mds 'allow *' \
810 --cap mgr 'allow *' \
811 "$keyring_fn"
812
813 # build a fresh fs monmap, mon fs
814 local params=()
815 local count=0
816 local mon_host=""
817 for f in $MONS
818 do
819 if [ $msgr -eq 1 ]; then
820 A="v1:$IP:$(($CEPH_PORT+$count+1))"
821 fi
822 if [ $msgr -eq 2 ]; then
823 A="v2:$IP:$(($CEPH_PORT+$count+1))"
824 fi
825 if [ $msgr -eq 21 ]; then
826 A="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
827 fi
828 params+=("--addv" "$f" "$A")
829 mon_host="$mon_host $A"
830 wconf <<EOF
831 [mon.$f]
832 host = $HOSTNAME
833 mon data = $CEPH_DEV_DIR/mon.$f
834 EOF
835 count=$(($count + 2))
836 done
837 wconf <<EOF
838 [global]
839 mon host = $mon_host
840 EOF
841 prun "$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
842
843 for f in $MONS
844 do
845 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
846 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
847 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
848 done
849
850 prun rm -- "$monmap_fn"
851 fi
852
853 # start monitors
854 for f in $MONS
855 do
856 run 'mon' $f $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
857 done
858 }
859
860 start_osd() {
861 if [ $inc_osd_num -gt 0 ]; then
862 old_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
863 start=$old_maxosd
864 end=$(($start-1+$inc_osd_num))
865 overwrite_conf=1 # fake wconf
866 else
867 start=0
868 end=$(($CEPH_NUM_OSD-1))
869 fi
870 local osds_wait
871 for osd in `seq $start $end`
872 do
873 local extra_seastar_args
874 if [ "$ceph_osd" == "crimson-osd" ]; then
875 # designate a single CPU node $osd for osd.$osd
876 extra_seastar_args="--smp 1 --cpuset $osd"
877 if [ "$debug" -ne 0 ]; then
878 extra_seastar_args+=" --debug"
879 fi
880 fi
881 if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
882 wconf <<EOF
883 [osd.$osd]
884 host = $HOSTNAME
885 EOF
886 if [ "$spdk_enabled" -eq 1 ]; then
887 wconf <<EOF
888 bluestore_block_path = spdk:${bluestore_spdk_dev[$osd]}
889 EOF
890 fi
891
892 rm -rf $CEPH_DEV_DIR/osd$osd || true
893 if command -v btrfs > /dev/null; then
894 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
895 fi
896 if [ -n "$filestore_path" ]; then
897 ln -s $filestore_path $CEPH_DEV_DIR/osd$osd
898 elif [ -n "$kstore_path" ]; then
899 ln -s $kstore_path $CEPH_DEV_DIR/osd$osd
900 else
901 mkdir -p $CEPH_DEV_DIR/osd$osd
902 if [ -n "${bluestore_dev[$osd]}" ]; then
903 dd if=/dev/zero of=${bluestore_dev[$osd]} bs=1M count=1
904 ln -s ${bluestore_dev[$osd]} $CEPH_DEV_DIR/osd$osd/block
905 wconf <<EOF
906 bluestore fsck on mount = false
907 EOF
908 fi
909 fi
910
911 local uuid=`uuidgen`
912 echo "add osd$osd $uuid"
913 OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
914 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
915 ceph_adm osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
916 rm $CEPH_DEV_DIR/osd$osd/new.json
917 $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args
918
919 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
920 cat > $key_fn<<EOF
921 [osd.$osd]
922 key = $OSD_SECRET
923 EOF
924 fi
925 echo start osd.$osd
926 local osd_pid
927 run 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
928 $extra_seastar_args $extra_osd_args \
929 -i $osd $ARGS $COSD_ARGS &
930 osd_pid=$!
931 if $parallel; then
932 osds_wait=$osd_pid
933 else
934 wait $osd_pid
935 fi
936 done
937 if $parallel; then
938 for p in $osds_wait; do
939 wait $p
940 done
941 debug echo OSDs started
942 fi
943 if [ $inc_osd_num -gt 0 ]; then
944 # update num osd
945 new_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
946 sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
947 fi
948 }
949
950 start_mgr() {
951 local mgr=0
952 local ssl=${DASHBOARD_SSL:-1}
953 # avoid monitors on nearby ports (which test/*.sh use extensively)
954 MGR_PORT=$(($CEPH_PORT + 1000))
955 PROMETHEUS_PORT=9283
956 for name in x y z a b c d e f g h i j k l m n o p
957 do
958 [ $mgr -eq $CEPH_NUM_MGR ] && break
959 mgr=$(($mgr + 1))
960 if [ "$new" -eq 1 ]; then
961 mkdir -p $CEPH_DEV_DIR/mgr.$name
962 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
963 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
964 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
965
966 wconf <<EOF
967 [mgr.$name]
968 host = $HOSTNAME
969 EOF
970
971 if $with_mgr_dashboard ; then
972 local port_option="ssl_server_port"
973 local http_proto="https"
974 if [ "$ssl" == "0" ]; then
975 port_option="server_port"
976 http_proto="http"
977 ceph_adm config set mgr mgr/dashboard/ssl false --force
978 fi
979 ceph_adm config set mgr mgr/dashboard/$name/$port_option $MGR_PORT --force
980 if [ $mgr -eq 1 ]; then
981 DASH_URLS="$http_proto://$IP:$MGR_PORT"
982 else
983 DASH_URLS+=", $http_proto://$IP:$MGR_PORT"
984 fi
985 fi
986 MGR_PORT=$(($MGR_PORT + 1000))
987 ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
988 PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
989
990 ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
991 if [ $mgr -eq 1 ]; then
992 RESTFUL_URLS="https://$IP:$MGR_PORT"
993 else
994 RESTFUL_URLS+=", https://$IP:$MGR_PORT"
995 fi
996 MGR_PORT=$(($MGR_PORT + 1000))
997 fi
998
999 debug echo "Starting mgr.${name}"
1000 run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
1001 done
1002
1003 if [ "$new" -eq 1 ]; then
1004 # setting login credentials for dashboard
1005 if $with_mgr_dashboard; then
1006 while ! ceph_adm -h | grep -c -q ^dashboard ; do
1007 debug echo 'waiting for mgr dashboard module to start'
1008 sleep 1
1009 done
1010 DASHBOARD_ADMIN_SECRET_FILE="${CEPH_CONF_PATH}/dashboard-admin-secret.txt"
1011 printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
1012 ceph_adm dashboard ac-user-create admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" \
1013 administrator --force-password
1014 if [ "$ssl" != "0" ]; then
1015 if ! ceph_adm dashboard create-self-signed-cert; then
1016 debug echo dashboard module not working correctly!
1017 fi
1018 fi
1019 fi
1020
1021 while ! ceph_adm -h | grep -c -q ^restful ; do
1022 debug echo 'waiting for mgr restful module to start'
1023 sleep 1
1024 done
1025 if ceph_adm restful create-self-signed-cert; then
1026 SF=`mktemp`
1027 ceph_adm restful create-key admin -o $SF
1028 RESTFUL_SECRET=`cat $SF`
1029 rm $SF
1030 else
1031 debug echo MGR Restful is not working, perhaps the package is not installed?
1032 fi
1033 fi
1034
1035 if [ "$cephadm" -eq 1 ]; then
1036 debug echo Enabling cephadm orchestrator
1037 if [ "$new" -eq 1 ]; then
1038 digest=$(curl -s \
1039 https://registry.hub.docker.com/v2/repositories/ceph/daemon-base/tags/latest-master-devel \
1040 | jq -r '.images[0].digest')
1041 ceph_adm config set global container_image "docker.io/ceph/daemon-base@$digest"
1042 fi
1043 ceph_adm config-key set mgr/cephadm/ssh_identity_key -i ~/.ssh/id_rsa
1044 ceph_adm config-key set mgr/cephadm/ssh_identity_pub -i ~/.ssh/id_rsa.pub
1045 ceph_adm mgr module enable cephadm
1046 ceph_adm orch set backend cephadm
1047 ceph_adm orch host add "$(hostname)"
1048 ceph_adm orch apply crash '*'
1049 ceph_adm config set mgr mgr/cephadm/allow_ptrace true
1050 fi
1051 }
1052
1053 start_mds() {
1054 local mds=0
1055 for name in a b c d e f g h i j k l m n o p
1056 do
1057 [ $mds -eq $CEPH_NUM_MDS ] && break
1058 mds=$(($mds + 1))
1059
1060 if [ "$new" -eq 1 ]; then
1061 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
1062 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
1063 wconf <<EOF
1064 [mds.$name]
1065 host = $HOSTNAME
1066 EOF
1067 if [ "$standby" -eq 1 ]; then
1068 mkdir -p $CEPH_DEV_DIR/mds.${name}s
1069 wconf <<EOF
1070 mds standby for rank = $mds
1071 [mds.${name}s]
1072 mds standby replay = true
1073 mds standby for name = ${name}
1074 EOF
1075 fi
1076 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
1077 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow rw tag cephfs *=*' mds 'allow' mgr 'allow profile mds'
1078 if [ "$standby" -eq 1 ]; then
1079 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
1080 "$CEPH_DEV_DIR/mds.${name}s/keyring"
1081 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
1082 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
1083 fi
1084 fi
1085
1086 run 'mds' $name $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
1087 if [ "$standby" -eq 1 ]; then
1088 run 'mds' $name $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
1089 fi
1090
1091 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
1092 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
1093 #ceph_adm mds set max_mds 2
1094 done
1095
1096 if [ $new -eq 1 ]; then
1097 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
1098 sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
1099 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
1100 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
1101 fi
1102
1103 # wait for volume module to load
1104 while ! ceph_adm fs volume ls ; do sleep 1 ; done
1105 local fs=0
1106 for name in a b c d e f g h i j k l m n o p
1107 do
1108 ceph_adm fs volume create ${name}
1109 ceph_adm fs authorize ${name} "client.fs_${name}" / rwp >> "$keyring_fn"
1110 fs=$(($fs + 1))
1111 [ $fs -eq $CEPH_NUM_FS ] && break
1112 done
1113 fi
1114 fi
1115
1116 }
1117
1118 # Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
1119 # nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
1120 # Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
1121 # the packages are available at
1122 # https://wiki.centos.org/SpecialInterestGroup/Storage
1123 # Similarly for Ubuntu>=16.04 follow the instructions on
1124 # https://launchpad.net/~nfs-ganesha
1125
1126 start_ganesha() {
1127 cluster_id="vstart"
1128 GANESHA_PORT=$(($CEPH_PORT + 4000))
1129 local ganesha=0
1130 test_user="$cluster_id"
1131 pool_name=".nfs"
1132 namespace=$cluster_id
1133 url="rados://$pool_name/$namespace/conf-nfs.$test_user"
1134
1135 prun ceph_adm auth get-or-create client.$test_user \
1136 mon "allow r" \
1137 osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
1138 mds "allow rw path=/" \
1139 >> "$keyring_fn"
1140
1141 ceph_adm mgr module enable test_orchestrator
1142 ceph_adm orch set backend test_orchestrator
1143 ceph_adm test_orchestrator load_data -i $CEPH_ROOT/src/pybind/mgr/test_orchestrator/dummy_data.json
1144 prun ceph_adm nfs cluster create $cluster_id
1145 prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path "/cephfs"
1146
1147 for name in a b c d e f g h i j k l m n o p
1148 do
1149 [ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
1150
1151 port=$(($GANESHA_PORT + ganesha))
1152 ganesha=$(($ganesha + 1))
1153 ganesha_dir="$CEPH_DEV_DIR/ganesha.$name"
1154 prun rm -rf $ganesha_dir
1155 prun mkdir -p $ganesha_dir
1156
1157 echo "NFS_CORE_PARAM {
1158 Enable_NLM = false;
1159 Enable_RQUOTA = false;
1160 Protocols = 4;
1161 NFS_Port = $port;
1162 }
1163
1164 MDCACHE {
1165 Dir_Chunk = 0;
1166 }
1167
1168 NFSv4 {
1169 RecoveryBackend = rados_cluster;
1170 Minor_Versions = 1, 2;
1171 }
1172
1173 RADOS_KV {
1174 pool = '$pool_name';
1175 namespace = $namespace;
1176 UserId = $test_user;
1177 nodeid = $name;
1178 }
1179
1180 RADOS_URLS {
1181 Userid = $test_user;
1182 watch_url = '$url';
1183 }
1184
1185 %url $url" > "$ganesha_dir/ganesha-$name.conf"
1186 wconf <<EOF
1187 [ganesha.$name]
1188 host = $HOSTNAME
1189 ip = $IP
1190 port = $port
1191 ganesha data = $ganesha_dir
1192 pid file = $CEPH_OUT_DIR/ganesha-$name.pid
1193 EOF
1194
1195 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace add $name
1196 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
1197
1198 prun env CEPH_CONF="${conf_fn}" ganesha.nfsd -L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
1199
1200 # Wait few seconds for grace period to be removed
1201 sleep 2
1202
1203 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
1204
1205 echo "$test_user ganesha daemon $name started on port: $port"
1206 done
1207 }
1208
1209 if [ "$debug" -eq 0 ]; then
1210 CMONDEBUG='
1211 debug mon = 10
1212 debug ms = 1'
1213 else
1214 debug echo "** going verbose **"
1215 CMONDEBUG='
1216 debug mon = 20
1217 debug paxos = 20
1218 debug auth = 20
1219 debug mgrc = 20
1220 debug ms = 1'
1221 fi
1222
1223 if [ -n "$MON_ADDR" ]; then
1224 CMON_ARGS=" -m "$MON_ADDR
1225 COSD_ARGS=" -m "$MON_ADDR
1226 CMDS_ARGS=" -m "$MON_ADDR
1227 fi
1228
1229 if [ -z "$CEPH_PORT" ]; then
1230 while [ true ]
1231 do
1232 CEPH_PORT="$(echo $(( RANDOM % 1000 + 40000 )))"
1233 ss -a -n | egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev/null 2>&1 || break
1234 done
1235 fi
1236
1237 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
1238
1239 # sudo if btrfs
1240 [ -d $CEPH_DEV_DIR/osd0/. ] && [ -e $CEPH_DEV_DIR/sudo ] && SUDO="sudo"
1241
1242 if [ $inc_osd_num -eq 0 ]; then
1243 prun $SUDO rm -f core*
1244 fi
1245
1246 [ -d $CEPH_ASOK_DIR ] || mkdir -p $CEPH_ASOK_DIR
1247 [ -d $CEPH_OUT_DIR ] || mkdir -p $CEPH_OUT_DIR
1248 [ -d $CEPH_DEV_DIR ] || mkdir -p $CEPH_DEV_DIR
1249 if [ $inc_osd_num -eq 0 ]; then
1250 $SUDO find "$CEPH_OUT_DIR" -type f -delete
1251 fi
1252 [ -d gmon ] && $SUDO rm -rf gmon/*
1253
1254 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
1255
1256
1257 # figure machine's ip
1258 HOSTNAME=`hostname -s`
1259 if [ -n "$ip" ]; then
1260 IP="$ip"
1261 else
1262 echo hostname $HOSTNAME
1263 if [ -x "$(which ip 2>/dev/null)" ]; then
1264 IP_CMD="ip addr"
1265 else
1266 IP_CMD="ifconfig"
1267 fi
1268 # filter out IPv4 and localhost addresses
1269 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
1270 # if nothing left, try using localhost address, it might work
1271 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
1272 fi
1273 echo "ip $IP"
1274 echo "port $CEPH_PORT"
1275
1276
1277 [ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
1278
1279 ceph_adm() {
1280 if [ "$cephx" -eq 1 ]; then
1281 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
1282 else
1283 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
1284 fi
1285 }
1286
1287 if [ $inc_osd_num -gt 0 ]; then
1288 start_osd
1289 exit
1290 fi
1291
1292 if [ "$new" -eq 1 ]; then
1293 prepare_conf
1294 fi
1295
1296 if [ $CEPH_NUM_MON -gt 0 ]; then
1297 start_mon
1298
1299 debug echo Populating config ...
1300 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1301 [global]
1302 osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
1303 osd_pool_default_min_size = 1
1304
1305 [mon]
1306 mon_osd_reporter_subtree_level = osd
1307 mon_data_avail_warn = 2
1308 mon_data_avail_crit = 1
1309 mon_allow_pool_delete = true
1310 mon_allow_pool_size_one = true
1311
1312 [osd]
1313 osd_scrub_load_threshold = 2000
1314 osd_debug_op_order = true
1315 osd_debug_misdirected_ops = true
1316 osd_copyfrom_max_chunk = 524288
1317
1318 [mds]
1319 mds_debug_frag = true
1320 mds_debug_auth_pins = true
1321 mds_debug_subtrees = true
1322
1323 [mgr]
1324 mgr/telemetry/nag = false
1325 mgr/telemetry/enable = false
1326
1327 EOF
1328
1329 if [ "$debug" -ne 0 ]; then
1330 debug echo Setting debug configs ...
1331 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1332 [mgr]
1333 debug_ms = 1
1334 debug_mgr = 20
1335 debug_monc = 20
1336 debug_mon = 20
1337
1338 [osd]
1339 debug_ms = 1
1340 debug_osd = 25
1341 debug_objecter = 20
1342 debug_monc = 20
1343 debug_mgrc = 20
1344 debug_journal = 20
1345 debug_filestore = 20
1346 debug_bluestore = 20
1347 debug_bluefs = 20
1348 debug_rocksdb = 20
1349 debug_bdev = 20
1350 debug_reserver = 10
1351 debug_objclass = 20
1352
1353 [mds]
1354 debug_ms = 1
1355 debug_mds = 20
1356 debug_monc = 20
1357 debug_mgrc = 20
1358 mds_debug_scatterstat = true
1359 mds_verify_scatter = true
1360 EOF
1361 fi
1362 if [ "$cephadm" -gt 0 ]; then
1363 debug echo Setting mon public_network ...
1364 public_network=$(ip route list | grep -w "$IP" | awk '{print $1}')
1365 ceph_adm config set mon public_network $public_network
1366 fi
1367 fi
1368
1369 if [ $CEPH_NUM_MGR -gt 0 ]; then
1370 start_mgr
1371 fi
1372
1373 # osd
1374 if [ $CEPH_NUM_OSD -gt 0 ]; then
1375 start_osd
1376 fi
1377
1378 # mds
1379 if [ "$smallmds" -eq 1 ]; then
1380 wconf <<EOF
1381 [mds]
1382 mds log max segments = 2
1383 # Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
1384 mds cache memory limit = 100M
1385 EOF
1386 fi
1387
1388 if [ $CEPH_NUM_MDS -gt 0 ]; then
1389 start_mds
1390 # key with access to all FS
1391 ceph_adm fs authorize \* "client.fs" / rwp >> "$keyring_fn"
1392 fi
1393
1394 # Don't set max_mds until all the daemons are started, otherwise
1395 # the intended standbys might end up in active roles.
1396 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1397 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
1398 fi
1399 fs=0
1400 for name in a b c d e f g h i j k l m n o p
1401 do
1402 [ $fs -eq $CEPH_NUM_FS ] && break
1403 fs=$(($fs + 1))
1404 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1405 ceph_adm fs set "${name}" max_mds "$CEPH_MAX_MDS"
1406 fi
1407 done
1408
1409 # mgr
1410
1411 if [ "$ec" -eq 1 ]; then
1412 ceph_adm <<EOF
1413 osd erasure-code-profile set ec-profile m=2 k=2
1414 osd pool create ec erasure ec-profile
1415 EOF
1416 fi
1417
1418 do_cache() {
1419 while [ -n "$*" ]; do
1420 p="$1"
1421 shift
1422 debug echo "creating cache for pool $p ..."
1423 ceph_adm <<EOF
1424 osd pool create ${p}-cache
1425 osd tier add $p ${p}-cache
1426 osd tier cache-mode ${p}-cache writeback
1427 osd tier set-overlay $p ${p}-cache
1428 EOF
1429 done
1430 }
1431 do_cache $cache
1432
1433 do_hitsets() {
1434 while [ -n "$*" ]; do
1435 pool="$1"
1436 type="$2"
1437 shift
1438 shift
1439 debug echo "setting hit_set on pool $pool type $type ..."
1440 ceph_adm <<EOF
1441 osd pool set $pool hit_set_type $type
1442 osd pool set $pool hit_set_count 8
1443 osd pool set $pool hit_set_period 30
1444 EOF
1445 done
1446 }
1447 do_hitsets $hitset
1448
1449 do_rgw_create_bucket()
1450 {
1451 # Create RGW Bucket
1452 local rgw_python_file='rgw-create-bucket.py'
1453 echo "import boto
1454 import boto.s3.connection
1455
1456 conn = boto.connect_s3(
1457 aws_access_key_id = '$s3_akey',
1458 aws_secret_access_key = '$s3_skey',
1459 host = '$HOSTNAME',
1460 port = 80,
1461 is_secure=False,
1462 calling_format = boto.s3.connection.OrdinaryCallingFormat(),
1463 )
1464
1465 bucket = conn.create_bucket('nfs-bucket')
1466 print('created new bucket')" > "$CEPH_OUT_DIR/$rgw_python_file"
1467 prun python $CEPH_OUT_DIR/$rgw_python_file
1468 }
1469
1470 do_rgw_create_users()
1471 {
1472 # Create S3 user
1473 s3_akey='0555b35654ad1656d804'
1474 s3_skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
1475 debug echo "setting up user testid"
1476 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $s3_akey --secret $s3_skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
1477
1478 # Create S3-test users
1479 # See: https://github.com/ceph/s3-tests
1480 debug echo "setting up s3-test users"
1481 $CEPH_BIN/radosgw-admin user create \
1482 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1483 --access-key ABCDEFGHIJKLMNOPQRST \
1484 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
1485 --display-name youruseridhere \
1486 --email s3@example.com -c $conf_fn > /dev/null
1487 $CEPH_BIN/radosgw-admin user create \
1488 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
1489 --access-key NOPQRSTUVWXYZABCDEFG \
1490 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
1491 --display-name john.doe \
1492 --email john.doe@example.com -c $conf_fn > /dev/null
1493 $CEPH_BIN/radosgw-admin user create \
1494 --tenant testx \
1495 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1496 --access-key HIJKLMNOPQRSTUVWXYZA \
1497 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
1498 --display-name tenanteduser \
1499 --email tenanteduser@example.com -c $conf_fn > /dev/null
1500
1501 # Create Swift user
1502 debug echo "setting up user tester"
1503 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
1504
1505 echo ""
1506 echo "S3 User Info:"
1507 echo " access key: $s3_akey"
1508 echo " secret key: $s3_skey"
1509 echo ""
1510 echo "Swift User Info:"
1511 echo " account : test"
1512 echo " user : tester"
1513 echo " password : testing"
1514 echo ""
1515 }
1516
1517 do_rgw()
1518 {
1519 if [ "$new" -eq 1 ]; then
1520 do_rgw_create_users
1521 if [ -n "$rgw_compression" ]; then
1522 debug echo "setting compression type=$rgw_compression"
1523 $CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
1524 fi
1525 fi
1526 # Start server
1527 if [ "$cephadm" -gt 0 ]; then
1528 ceph_adm orch apply rgw rgwTest
1529 return
1530 fi
1531
1532 RGWDEBUG=""
1533 if [ "$debug" -ne 0 ]; then
1534 RGWDEBUG="--debug-rgw=20 --debug-ms=1"
1535 fi
1536
1537 local CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT}"
1538 local CEPH_RGW_HTTPS="${CEPH_RGW_PORT: -1}"
1539 if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
1540 CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT::-1}"
1541 else
1542 CEPH_RGW_HTTPS=""
1543 fi
1544 RGWSUDO=
1545 [ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO=sudo
1546
1547 current_port=$CEPH_RGW_PORT
1548 for n in $(seq 1 $CEPH_NUM_RGW); do
1549 rgw_name="client.rgw.${current_port}"
1550
1551 ceph_adm auth get-or-create $rgw_name \
1552 mon 'allow rw' \
1553 osd 'allow rwx' \
1554 mgr 'allow rw' \
1555 >> "$keyring_fn"
1556
1557 debug echo start rgw on http${CEPH_RGW_HTTPS}://localhost:${current_port}
1558 run 'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn \
1559 --log-file=${CEPH_OUT_DIR}/radosgw.${current_port}.log \
1560 --admin-socket=${CEPH_OUT_DIR}/radosgw.${current_port}.asok \
1561 --pid-file=${CEPH_OUT_DIR}/radosgw.${current_port}.pid \
1562 --rgw_luarocks_location=${CEPH_OUT_DIR}/luarocks \
1563 ${RGWDEBUG} \
1564 -n ${rgw_name} \
1565 "--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}"
1566
1567 i=$(($i + 1))
1568 [ $i -eq $CEPH_NUM_RGW ] && break
1569
1570 current_port=$((current_port+1))
1571 done
1572 }
1573 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1574 do_rgw
1575 fi
1576
1577 # Ganesha Daemons
1578 if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
1579 pseudo_path="/cephfs"
1580 if [ "$cephadm" -gt 0 ]; then
1581 cluster_id="vstart"
1582 port="2049"
1583 prun ceph_adm nfs cluster create $cluster_id
1584 if [ $CEPH_NUM_MDS -gt 0 ]; then
1585 prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path $pseudo_path
1586 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1587 fi
1588 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1589 pseudo_path="/rgw"
1590 do_rgw_create_bucket
1591 prun ceph_adm nfs export create rgw --cluster-id $cluster_id --pseudo-path $pseudo_path --bucket "nfs-bucket"
1592 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1593 fi
1594 else
1595 start_ganesha
1596 echo "Mount using: mount -t nfs -o port=<ganesha-port-num> $IP:$pseudo_path mountpoint"
1597 fi
1598 fi
1599
1600 docker_service(){
1601 local service=''
1602 #prefer podman
1603 if pgrep -f podman > /dev/null; then
1604 service="podman"
1605 elif pgrep -f docker > /dev/null; then
1606 service="docker"
1607 fi
1608 if [ -n "$service" ]; then
1609 echo "using $service for deploying jaeger..."
1610 #check for exited container, remove them and restart container
1611 if [ "$($service ps -aq -f status=exited -f name=jaeger)" ]; then
1612 $service rm jaeger
1613 fi
1614 if [ ! "$(podman ps -aq -f name=jaeger)" ]; then
1615 $service "$@"
1616 fi
1617 else
1618 echo "cannot find docker or podman, please restart service and rerun."
1619 fi
1620 }
1621
1622 echo ""
1623 if [ $with_jaeger -eq 1 ]; then
1624 debug echo "Enabling jaegertracing..."
1625 docker_service run -d --name jaeger \
1626 -p 5775:5775/udp \
1627 -p 6831:6831/udp \
1628 -p 6832:6832/udp \
1629 -p 5778:5778 \
1630 -p 16686:16686 \
1631 -p 14268:14268 \
1632 -p 14250:14250 \
1633 jaegertracing/all-in-one:1.20
1634 fi
1635
1636
1637 debug echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
1638
1639 echo ""
1640 if [ "$new" -eq 1 ]; then
1641 if $with_mgr_dashboard; then
1642 echo "dashboard urls: $DASH_URLS"
1643 echo " w/ user/pass: admin / admin"
1644 fi
1645 echo "restful urls: $RESTFUL_URLS"
1646 echo " w/ user/pass: admin / $RESTFUL_SECRET"
1647 echo ""
1648 fi
1649 echo ""
1650 # add header to the environment file
1651 {
1652 echo "#"
1653 echo "# source this file into your shell to set up the environment."
1654 echo "# For example:"
1655 echo "# $ . $CEPH_DIR/vstart_environment.sh"
1656 echo "#"
1657 } > $CEPH_DIR/vstart_environment.sh
1658 {
1659 echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
1660 echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
1661 echo "export PATH=$CEPH_DIR/bin:\$PATH"
1662
1663 if [ "$CEPH_DIR" != "$PWD" ]; then
1664 echo "export CEPH_CONF=$conf_fn"
1665 echo "export CEPH_KEYRING=$keyring_fn"
1666 fi
1667
1668 if [ -n "$CEPHFS_SHELL" ]; then
1669 echo "alias cephfs-shell=$CEPHFS_SHELL"
1670 fi
1671 } | tee -a $CEPH_DIR/vstart_environment.sh
1672
1673 echo "CEPH_DEV=1"
1674
1675 # always keep this section at the very bottom of this file
1676 STRAY_CONF_PATH="/etc/ceph/ceph.conf"
1677 if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
1678 echo ""
1679 echo ""
1680 echo "WARNING:"
1681 echo " Please remove stray $STRAY_CONF_PATH if not needed."
1682 echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
1683 echo " and may lead to undesired results."
1684 echo ""
1685 echo "NOTE:"
1686 echo " Remember to restart cluster after removing $STRAY_CONF_PATH"
1687 fi
1688
1689 init_logrotate