]> git.proxmox.com Git - ceph.git/blame - ceph/src/vstart.sh
import 15.2.4
[ceph.git] / ceph / src / vstart.sh
CommitLineData
11fdf7f2 1#!/usr/bin/env bash
9f95a23c
TL
2# -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
3# vim: softtabstop=4 shiftwidth=4 expandtab
7c673cae
FG
4
5# abort on failure
6set -e
7
9f95a23c
TL
8quoted_print() {
9 for s in "$@"; do
10 if [[ "$s" =~ \ ]]; then
11 printf -- "'%s' " "$s"
12 else
13 printf -- "$s "
14 fi
15 done
16 printf '\n'
17}
18
19debug() {
20 "$@" >&2
21}
22
23prunb() {
24 debug quoted_print "$@" '&'
25 "$@" &
26}
27
28prun() {
29 debug quoted_print "$@"
30 "$@"
31}
32
33
7c673cae 34if [ -n "$VSTART_DEST" ]; then
9f95a23c
TL
35 SRC_PATH=`dirname $0`
36 SRC_PATH=`(cd $SRC_PATH; pwd)`
7c673cae 37
9f95a23c
TL
38 CEPH_DIR=$SRC_PATH
39 CEPH_BIN=${PWD}/bin
40 CEPH_LIB=${PWD}/lib
7c673cae 41
9f95a23c
TL
42 CEPH_CONF_PATH=$VSTART_DEST
43 CEPH_DEV_DIR=$VSTART_DEST/dev
44 CEPH_OUT_DIR=$VSTART_DEST/out
45 CEPH_ASOK_DIR=$VSTART_DEST/out
7c673cae
FG
46fi
47
11fdf7f2
TL
48get_cmake_variable() {
49 local variable=$1
9f95a23c 50 grep "${variable}:" CMakeCache.txt | cut -d "=" -f 2
11fdf7f2
TL
51}
52
7c673cae
FG
53# for running out of the CMake build directory
54if [ -e CMakeCache.txt ]; then
9f95a23c
TL
55 # Out of tree build, learn source location from CMakeCache.txt
56 CEPH_ROOT=$(get_cmake_variable ceph_SOURCE_DIR)
57 CEPH_BUILD_DIR=`pwd`
58 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
7c673cae
FG
59fi
60
9f95a23c 61# use CEPH_BUILD_ROOT to vstart from a 'make install'
7c673cae 62if [ -n "$CEPH_BUILD_ROOT" ]; then
9f95a23c
TL
63 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
64 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
65 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
66 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
67 # make install should install python extensions into PYTHONPATH
7c673cae 68elif [ -n "$CEPH_ROOT" ]; then
9f95a23c
TL
69 [ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL=$CEPH_ROOT/src/tools/cephfs/cephfs-shell
70 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
71 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
72 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
73 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
74 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
75 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
76 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
77 [ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON=$CEPH_ROOT/src/python-common
7c673cae
FG
78fi
79
80if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
81 PATH=$(pwd):$PATH
82fi
83
84[ -z "$PYBIND" ] && PYBIND=./pybind
85
9f95a23c
TL
86[ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON="$CEPH_PYTHON_COMMON:"
87CYTHON_PYTHONPATH="$CEPH_LIB/cython_modules/lib.3"
88export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
89
7c673cae
FG
90export LD_LIBRARY_PATH=$CEPH_LIB:$LD_LIBRARY_PATH
91export DYLD_LIBRARY_PATH=$CEPH_LIB:$DYLD_LIBRARY_PATH
31f18b77 92# Suppress logging for regular use that indicated that we are using a
9f95a23c 93# development version. vstart.sh is only used during testing and
31f18b77
FG
94# development
95export CEPH_DEV=1
7c673cae
FG
96
97[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
98[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
99[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
100[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
101[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
102[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
9f95a23c 103[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM="$GANESHA"
7c673cae
FG
104
105# if none of the CEPH_NUM_* number is specified, kill the existing
106# cluster.
107if [ -z "$CEPH_NUM_MON" -a \
108 -z "$CEPH_NUM_OSD" -a \
109 -z "$CEPH_NUM_MDS" -a \
9f95a23c
TL
110 -z "$CEPH_NUM_MGR" -a \
111 -z "$GANESHA_DAEMON_NUM" ]; then
7c673cae
FG
112 kill_all=1
113else
114 kill_all=0
115fi
116
117[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
118[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
119[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
120[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
121[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
122[ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
123[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
9f95a23c 124[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM=0
7c673cae
FG
125
126[ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
127[ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
128[ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
129[ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
130[ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
131
132if [ $CEPH_NUM_OSD -gt 3 ]; then
133 OSD_POOL_DEFAULT_SIZE=3
134else
135 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
136fi
137
138extra_conf=""
139new=0
140standby=0
141debug=0
142ip=""
143nodaemon=0
9f95a23c 144redirect=0
7c673cae
FG
145smallmds=0
146short=0
147ec=0
9f95a23c
TL
148cephadm=0
149parallel=true
7c673cae 150hitset=""
9f95a23c 151overwrite_conf=0
7c673cae 152cephx=1 #turn cephx on by default
11fdf7f2 153gssapi_authx=0
7c673cae 154cache=""
11fdf7f2
TL
155if [ `uname` = FreeBSD ]; then
156 objectstore="filestore"
157else
158 objectstore="bluestore"
159fi
9f95a23c 160ceph_osd=ceph-osd
11fdf7f2 161rgw_frontend="beast"
224ce89b 162rgw_compression=""
7c673cae 163lockdep=${LOCKDEP:-1}
11fdf7f2
TL
164spdk_enabled=0 #disable SPDK by default
165pci_id=""
166
167with_mgr_dashboard=true
168if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
169 [[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
9f95a23c
TL
170 debug echo "ceph-mgr dashboard not built - disabling."
171 with_mgr_dashboard=false
11fdf7f2 172fi
7c673cae 173
31f18b77 174filestore_path=
11fdf7f2 175kstore_path=
9f95a23c 176bluestore_dev=
31f18b77 177
7c673cae
FG
178VSTART_SEC="client.vstart.sh"
179
180MON_ADDR=""
31f18b77
FG
181DASH_URLS=""
182RESTFUL_URLS=""
7c673cae
FG
183
184conf_fn="$CEPH_CONF_PATH/ceph.conf"
185keyring_fn="$CEPH_CONF_PATH/keyring"
186osdmap_fn="/tmp/ceph_osdmap.$$"
187monmap_fn="/tmp/ceph_monmap.$$"
9f95a23c 188inc_osd_num=0
7c673cae 189
11fdf7f2
TL
190msgr="21"
191
9f95a23c 192usage="usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 GANESHA=1 $0 -n -d\n"
7c673cae
FG
193usage=$usage"options:\n"
194usage=$usage"\t-d, --debug\n"
195usage=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
196usage=$usage"\t-l, --localhost: use localhost instead of hostname\n"
197usage=$usage"\t-i <ip>: bind to specific ip\n"
198usage=$usage"\t-n, --new\n"
7c673cae
FG
199usage=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
200usage=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
9f95a23c
TL
201usage=$usage"\t--redirect-output: only useful with nodaemon, directs output to log file\n"
202usage=$usage"\t--smallmds: limit mds cache memory limit\n"
7c673cae 203usage=$usage"\t-m ip:port\t\tspecify monitor address\n"
9f95a23c 204usage=$usage"\t-k keep old configuration files (default)\n"
7c673cae
FG
205usage=$usage"\t-x enable cephx (on by default)\n"
206usage=$usage"\t-X disable cephx\n"
11fdf7f2
TL
207usage=$usage"\t-g --gssapi enable Kerberos/GSSApi authentication\n"
208usage=$usage"\t-G disable Kerberos/GSSApi authentication\n"
7c673cae
FG
209usage=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
210usage=$usage"\t-e : create an erasure pool\n";
211usage=$usage"\t-o config\t\t add extra config parameters to all sections\n"
7c673cae
FG
212usage=$usage"\t--rgw_port specify ceph rgw http listen port\n"
213usage=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
224ce89b 214usage=$usage"\t--rgw_compression specify the rgw compression plugin\n"
11fdf7f2
TL
215usage=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend (default)\n"
216usage=$usage"\t-f, --filestore use filestore as the osd objectstore backend\n"
217usage=$usage"\t-K, --kstore use kstore as the osd objectstore backend\n"
7c673cae
FG
218usage=$usage"\t--memstore use memstore as the osd objectstore backend\n"
219usage=$usage"\t--cache <pool>: enable cache tiering on pool\n"
220usage=$usage"\t--short: short object names only; necessary for ext4 dev\n"
221usage=$usage"\t--nolockdep disable lockdep\n"
222usage=$usage"\t--multimds <count> allow multimds with maximum active count\n"
11fdf7f2
TL
223usage=$usage"\t--without-dashboard: do not run using mgr dashboard\n"
224usage=$usage"\t--bluestore-spdk <vendor>:<device>: enable SPDK and specify the PCI-ID of the NVME device\n"
225usage=$usage"\t--msgr1: use msgr1 only\n"
226usage=$usage"\t--msgr2: use msgr2 only\n"
227usage=$usage"\t--msgr21: use msgr2 and msgr1\n"
9f95a23c
TL
228usage=$usage"\t--crimson: use crimson-osd instead of ceph-osd\n"
229usage=$usage"\t--osd-args: specify any extra osd specific options\n"
230usage=$usage"\t--bluestore-devs: comma-separated list of blockdevs to use for bluestore\n"
231usage=$usage"\t--inc-osd: append some more osds into existing vcluster\n"
232usage=$usage"\t--cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]\n"
233usage=$usage"\t--no-parallel: dont start all OSDs in parallel\n"
7c673cae
FG
234
235usage_exit() {
9f95a23c
TL
236 printf "$usage"
237 exit
7c673cae
FG
238}
239
240while [ $# -ge 1 ]; do
241case $1 in
242 -d | --debug )
9f95a23c
TL
243 debug=1
244 ;;
7c673cae 245 -s | --standby_mds)
9f95a23c
TL
246 standby=1
247 ;;
7c673cae 248 -l | --localhost )
9f95a23c
TL
249 ip="127.0.0.1"
250 ;;
7c673cae 251 -i )
9f95a23c
TL
252 [ -z "$2" ] && usage_exit
253 ip="$2"
254 shift
255 ;;
7c673cae 256 -e )
9f95a23c
TL
257 ec=1
258 ;;
7c673cae 259 --new | -n )
9f95a23c
TL
260 new=1
261 ;;
262 --inc-osd )
263 new=0
264 kill_all=0
265 inc_osd_num=$2
266 if [ "$inc_osd_num" == "" ]; then
267 inc_osd_num=1
268 else
269 shift
270 fi
271 ;;
7c673cae 272 --short )
9f95a23c
TL
273 short=1
274 ;;
275 --crimson )
276 ceph_osd=crimson-osd
277 ;;
278 --osd-args )
279 extra_osd_args="$2"
280 shift
281 ;;
11fdf7f2 282 --msgr1 )
9f95a23c
TL
283 msgr="1"
284 ;;
11fdf7f2 285 --msgr2 )
9f95a23c
TL
286 msgr="2"
287 ;;
11fdf7f2 288 --msgr21 )
9f95a23c
TL
289 msgr="21"
290 ;;
291 --cephadm )
292 cephadm=1
293 ;;
294 --no-parallel )
295 parallel=false
296 ;;
7c673cae 297 --valgrind )
9f95a23c
TL
298 [ -z "$2" ] && usage_exit
299 valgrind=$2
300 shift
301 ;;
7c673cae 302 --valgrind_args )
9f95a23c
TL
303 valgrind_args="$2"
304 shift
305 ;;
7c673cae 306 --valgrind_mds )
9f95a23c
TL
307 [ -z "$2" ] && usage_exit
308 valgrind_mds=$2
309 shift
310 ;;
7c673cae 311 --valgrind_osd )
9f95a23c
TL
312 [ -z "$2" ] && usage_exit
313 valgrind_osd=$2
314 shift
315 ;;
7c673cae 316 --valgrind_mon )
9f95a23c
TL
317 [ -z "$2" ] && usage_exit
318 valgrind_mon=$2
319 shift
320 ;;
7c673cae 321 --valgrind_mgr )
9f95a23c
TL
322 [ -z "$2" ] && usage_exit
323 valgrind_mgr=$2
324 shift
325 ;;
7c673cae 326 --valgrind_rgw )
9f95a23c
TL
327 [ -z "$2" ] && usage_exit
328 valgrind_rgw=$2
329 shift
330 ;;
7c673cae 331 --nodaemon )
9f95a23c
TL
332 nodaemon=1
333 ;;
334 --redirect-output)
335 redirect=1
336 ;;
7c673cae 337 --smallmds )
9f95a23c
TL
338 smallmds=1
339 ;;
7c673cae 340 --rgw_port )
9f95a23c
TL
341 CEPH_RGW_PORT=$2
342 shift
343 ;;
7c673cae 344 --rgw_frontend )
9f95a23c
TL
345 rgw_frontend=$2
346 shift
347 ;;
224ce89b 348 --rgw_compression )
9f95a23c
TL
349 rgw_compression=$2
350 shift
351 ;;
11fdf7f2 352 --kstore_path )
9f95a23c
TL
353 kstore_path=$2
354 shift
355 ;;
31f18b77 356 --filestore_path )
9f95a23c
TL
357 filestore_path=$2
358 shift
359 ;;
7c673cae 360 -m )
9f95a23c
TL
361 [ -z "$2" ] && usage_exit
362 MON_ADDR=$2
363 shift
364 ;;
7c673cae 365 -x )
9f95a23c
TL
366 cephx=1 # this is on be default, flag exists for historical consistency
367 ;;
7c673cae 368 -X )
9f95a23c
TL
369 cephx=0
370 ;;
371
11fdf7f2 372 -g | --gssapi)
9f95a23c
TL
373 gssapi_authx=1
374 ;;
11fdf7f2 375 -G)
9f95a23c
TL
376 gssapi_authx=0
377 ;;
11fdf7f2 378
7c673cae 379 -k )
9f95a23c
TL
380 if [ ! -r $conf_fn ]; then
381 echo "cannot use old configuration: $conf_fn not readable." >&2
382 exit
383 fi
384 new=0
385 ;;
7c673cae 386 --memstore )
9f95a23c
TL
387 objectstore="memstore"
388 ;;
7c673cae 389 -b | --bluestore )
9f95a23c
TL
390 objectstore="bluestore"
391 ;;
11fdf7f2 392 -f | --filestore )
9f95a23c
TL
393 objectstore="filestore"
394 ;;
11fdf7f2 395 -K | --kstore )
9f95a23c
TL
396 objectstore="kstore"
397 ;;
7c673cae 398 --hitset )
9f95a23c
TL
399 hitset="$hitset $2 $3"
400 shift
401 shift
402 ;;
7c673cae 403 -o )
9f95a23c 404 extra_conf="$extra_conf $2
7c673cae 405"
9f95a23c
TL
406 shift
407 ;;
7c673cae 408 --cache )
9f95a23c
TL
409 if [ -z "$cache" ]; then
410 cache="$2"
411 else
412 cache="$cache $2"
413 fi
414 shift
415 ;;
7c673cae 416 --nolockdep )
9f95a23c
TL
417 lockdep=0
418 ;;
7c673cae
FG
419 --multimds)
420 CEPH_MAX_MDS="$2"
421 shift
422 ;;
11fdf7f2
TL
423 --without-dashboard)
424 with_mgr_dashboard=false
425 ;;
426 --bluestore-spdk )
427 [ -z "$2" ] && usage_exit
428 pci_id="$2"
429 spdk_enabled=1
430 shift
431 ;;
9f95a23c
TL
432 --bluestore-devs )
433 IFS=',' read -r -a bluestore_dev <<< "$2"
434 for dev in "${bluestore_dev[@]}"; do
435 if [ ! -b $dev -o ! -w $dev ]; then
436 echo "All --bluestore-devs must refer to writable block devices"
437 exit 1
438 fi
439 done
440 shift
441 ;;
7c673cae 442 * )
9f95a23c 443 usage_exit
7c673cae
FG
444esac
445shift
446done
447
448if [ $kill_all -eq 1 ]; then
449 $SUDO $INIT_CEPH stop
450fi
451
9f95a23c
TL
452if [ "$new" -eq 0 ]; then
453 if [ -z "$CEPH_ASOK_DIR" ]; then
454 CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
455 fi
c07f9fc5 456 mkdir -p $CEPH_ASOK_DIR
9f95a23c 457 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
7c673cae 458 CEPH_NUM_MON="$MON"
9f95a23c 459 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
7c673cae 460 CEPH_NUM_OSD="$OSD"
9f95a23c 461 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
7c673cae 462 CEPH_NUM_MDS="$MDS"
9f95a23c 463 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
7c673cae 464 CEPH_NUM_MGR="$MGR"
9f95a23c 465 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
7c673cae 466 CEPH_NUM_RGW="$RGW"
9f95a23c
TL
467 GANESHA=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
468 GANESHA_DAEMON_NUM="$GANESHA"
7c673cae 469else
9f95a23c
TL
470 # only delete if -n
471 if [ -e "$conf_fn" ]; then
472 asok_dir=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
473 rm -- "$conf_fn"
474 if [ $asok_dir != /var/run/ceph ]; then
c07f9fc5 475 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
c07f9fc5 476 fi
9f95a23c
TL
477 fi
478 if [ -z "$CEPH_ASOK_DIR" ]; then
479 CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
7c673cae
FG
480 fi
481fi
482
483ARGS="-c $conf_fn"
484
7c673cae
FG
485run() {
486 type=$1
487 shift
9f95a23c
TL
488 num=$1
489 shift
7c673cae
FG
490 eval "valg=\$valgrind_$type"
491 [ -z "$valg" ] && valg="$valgrind"
492
493 if [ -n "$valg" ]; then
494 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
495 sleep 1
496 else
497 if [ "$nodaemon" -eq 0 ]; then
498 prun "$@"
9f95a23c
TL
499 elif [ "$redirect" -eq 0 ]; then
500 prunb ${CEPH_ROOT}/src/ceph-run "$@" -f
7c673cae 501 else
9f95a23c 502 ( prunb ${CEPH_ROOT}/src/ceph-run "$@" -f ) >$CEPH_OUT_DIR/$type.$num.stdout 2>&1
7c673cae
FG
503 fi
504 fi
505}
506
507wconf() {
9f95a23c
TL
508 if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
509 cat >> "$conf_fn"
510 fi
7c673cae
FG
511}
512
11fdf7f2 513get_pci_selector() {
9f95a23c
TL
514 which_pci=$1
515 lspci -mm -n -D -d $pci_id | cut -d ' ' -f 1 | sed -n $which_pci'p'
516}
517
518get_pci_selector_num() {
519 lspci -mm -n -D -d $pci_id | cut -d' ' -f 1 | wc -l
520}
521
522do_rgw_conf() {
523
524 if [ $CEPH_NUM_RGW -eq 0 ]; then
525 return 0
526 fi
527
528 # setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
529 # individual rgw's ids will be their ports.
530 current_port=$CEPH_RGW_PORT
531 for n in $(seq 1 $CEPH_NUM_RGW); do
532 wconf << EOF
533[client.rgw.${current_port}]
534 rgw frontends = $rgw_frontend port=${current_port}
535 admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
536EOF
537 current_port=$((current_port + 1))
538done
539
11fdf7f2
TL
540}
541
7c673cae
FG
542prepare_conf() {
543 local DAEMONOPTS="
544 log file = $CEPH_OUT_DIR/\$name.log
c07f9fc5 545 admin socket = $CEPH_ASOK_DIR/\$name.asok
7c673cae
FG
546 chdir = \"\"
547 pid file = $CEPH_OUT_DIR/\$name.pid
548 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
549"
550
11fdf7f2
TL
551 local mgr_modules="restful iostat"
552 if $with_mgr_dashboard; then
9f95a23c 553 mgr_modules="dashboard $mgr_modules"
11fdf7f2
TL
554 fi
555
556 local msgr_conf=''
557 if [ $msgr -eq 21 ]; then
9f95a23c
TL
558 msgr_conf="
559 ms bind msgr2 = true
560 ms bind msgr1 = true
11fdf7f2
TL
561";
562 fi
563 if [ $msgr -eq 2 ]; then
9f95a23c
TL
564 msgr_conf="
565 ms bind msgr2 = true
566 ms bind msgr1 = false
11fdf7f2
TL
567";
568 fi
569 if [ $msgr -eq 1 ]; then
9f95a23c
TL
570 msgr_conf="
571 ms bind msgr2 = false
572 ms bind msgr1 = true
11fdf7f2
TL
573";
574 fi
575
7c673cae
FG
576 wconf <<EOF
577; generated by vstart.sh on `date`
578[$VSTART_SEC]
579 num mon = $CEPH_NUM_MON
580 num osd = $CEPH_NUM_OSD
581 num mds = $CEPH_NUM_MDS
582 num mgr = $CEPH_NUM_MGR
583 num rgw = $CEPH_NUM_RGW
9f95a23c 584 num ganesha = $GANESHA_DAEMON_NUM
7c673cae
FG
585
586[global]
587 fsid = $(uuidgen)
7c673cae 588 osd failsafe full ratio = .99
11fdf7f2 589 mon osd full ratio = .99
c07f9fc5
FG
590 mon osd nearfull ratio = .99
591 mon osd backfillfull ratio = .99
9f95a23c 592 mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
7c673cae
FG
593 erasure code dir = $EC_PATH
594 plugin dir = $CEPH_LIB
7c673cae
FG
595 filestore fd cache size = 32
596 run dir = $CEPH_OUT_DIR
9f95a23c 597 crash dir = $CEPH_OUT_DIR
7c673cae 598 enable experimental unrecoverable data corrupting features = *
9f95a23c
TL
599 osd_crush_chooseleaf_type = 0
600 debug asok assert abort = true
11fdf7f2
TL
601$msgr_conf
602$extra_conf
7c673cae 603EOF
9f95a23c
TL
604 if [ "$lockdep" -eq 1 ] ; then
605 wconf <<EOF
7c673cae
FG
606 lockdep = true
607EOF
9f95a23c
TL
608 fi
609 if [ "$cephx" -eq 1 ] ; then
610 wconf <<EOF
611 auth cluster required = cephx
612 auth service required = cephx
613 auth client required = cephx
7c673cae 614EOF
9f95a23c
TL
615 elif [ "$gssapi_authx" -eq 1 ] ; then
616 wconf <<EOF
617 auth cluster required = gss
618 auth service required = gss
619 auth client required = gss
620 gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
11fdf7f2 621EOF
9f95a23c
TL
622 else
623 wconf <<EOF
624 auth cluster required = none
625 auth service required = none
626 auth client required = none
7c673cae 627EOF
9f95a23c
TL
628 fi
629 if [ "$short" -eq 1 ]; then
630 COSDSHORT=" osd max object name len = 460
7c673cae 631 osd max object namespace len = 64"
9f95a23c
TL
632 fi
633 if [ "$objectstore" == "bluestore" ]; then
634 if [ "$spdk_enabled" -eq 1 ]; then
635 if [ "$(get_pci_selector_num)" -eq 0 ]; then
636 echo "Not find the specified NVME device, please check." >&2
637 exit
638 fi
639 if [ $(get_pci_selector_num) -lt $CEPH_NUM_OSD ]; then
640 echo "OSD number ($CEPH_NUM_OSD) is greater than NVME SSD number ($(get_pci_selector_num)), please check." >&2
641 exit
642 fi
643 BLUESTORE_OPTS=" bluestore_block_db_path = \"\"
11fdf7f2
TL
644 bluestore_block_db_size = 0
645 bluestore_block_db_create = false
646 bluestore_block_wal_path = \"\"
647 bluestore_block_wal_size = 0
648 bluestore_block_wal_create = false
9f95a23c
TL
649 bluestore_spdk_mem = 2048"
650 else
651 BLUESTORE_OPTS=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
11fdf7f2
TL
652 bluestore block db size = 1073741824
653 bluestore block db create = true
654 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
655 bluestore block wal size = 1048576000
656 bluestore block wal create = true"
11fdf7f2 657 fi
9f95a23c
TL
658 fi
659 wconf <<EOF
7c673cae
FG
660[client]
661 keyring = $keyring_fn
662 log file = $CEPH_OUT_DIR/\$name.\$pid.log
c07f9fc5 663 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
9f95a23c 664
11fdf7f2 665 ; needed for s3tests
9f95a23c 666 rgw crypt s3 kms backend = testing
11fdf7f2
TL
667 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
668 rgw crypt require ssl = false
669 ; uncomment the following to set LC days as the value in seconds;
670 ; needed for passing lc time based s3-tests (can be verbose)
671 ; rgw lc debug interval = 10
9f95a23c
TL
672
673$extra_conf
674EOF
675wconf <<EOF
676[cephfs-shell]
677 debug shell = true
678
679$extra_conf
680EOF
681
682 do_rgw_conf
683 wconf << EOF
7c673cae
FG
684[mds]
685$DAEMONOPTS
7c673cae
FG
686 mds data = $CEPH_DEV_DIR/mds.\$id
687 mds root ino uid = `id -u`
688 mds root ino gid = `id -g`
689$extra_conf
690[mgr]
7c673cae
FG
691 mgr data = $CEPH_DEV_DIR/mgr.\$id
692 mgr module path = $MGR_PYTHON_PATH
9f95a23c 693 cephadm path = $CEPH_ROOT/src/cephadm/cephadm
7c673cae 694$DAEMONOPTS
7c673cae
FG
695$extra_conf
696[osd]
697$DAEMONOPTS
698 osd_check_max_object_name_len_on_startup = false
699 osd data = $CEPH_DEV_DIR/osd\$id
700 osd journal = $CEPH_DEV_DIR/osd\$id/journal
701 osd journal size = 100
702 osd class tmp = out
703 osd class dir = $OBJCLASS_PATH
704 osd class load list = *
705 osd class default list = *
92f5a8d4 706 osd fast shutdown = false
11fdf7f2 707
7c673cae
FG
708 filestore wbthrottle xfs ios start flusher = 10
709 filestore wbthrottle xfs ios hard limit = 20
710 filestore wbthrottle xfs inodes hard limit = 30
711 filestore wbthrottle btrfs ios start flusher = 10
712 filestore wbthrottle btrfs ios hard limit = 20
713 filestore wbthrottle btrfs inodes hard limit = 30
7c673cae
FG
714 bluestore fsck on mount = true
715 bluestore block create = true
11fdf7f2
TL
716$BLUESTORE_OPTS
717
718 ; kstore
719 kstore fsck on mount = true
720 osd objectstore = $objectstore
7c673cae
FG
721$COSDSHORT
722$extra_conf
723[mon]
11fdf7f2 724 mgr initial modules = $mgr_modules
7c673cae
FG
725$DAEMONOPTS
726$CMONDEBUG
727$extra_conf
728 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
11fdf7f2 729 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
7c673cae
FG
730EOF
731}
732
733start_mon() {
734 local MONS=""
735 local count=0
736 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
737 do
738 [ $count -eq $CEPH_NUM_MON ] && break;
739 count=$(($count + 1))
9f95a23c
TL
740 if [ -z "$MONS" ]; then
741 MONS="$f"
7c673cae 742 else
9f95a23c 743 MONS="$MONS $f"
7c673cae
FG
744 fi
745 done
746
747 if [ "$new" -eq 1 ]; then
9f95a23c
TL
748 if [ `echo $IP | grep '^127\\.'` ]; then
749 echo
750 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
751 echo " connect. either adjust /etc/hosts, or edit this script to use your"
752 echo " machine's real IP."
753 echo
754 fi
755
756 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
757 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
758 --cap mon 'allow *' \
759 --cap osd 'allow *' \
760 --cap mds 'allow *' \
761 --cap mgr 'allow *' \
762 "$keyring_fn"
763
764 # build a fresh fs monmap, mon fs
765 local params=()
766 local count=0
767 local mon_host=""
768 for f in $MONS
769 do
770 if [ $msgr -eq 1 ]; then
771 A="v1:$IP:$(($CEPH_PORT+$count+1))"
772 fi
773 if [ $msgr -eq 2 ]; then
774 A="v2:$IP:$(($CEPH_PORT+$count+1))"
775 fi
776 if [ $msgr -eq 21 ]; then
777 A="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
778 fi
779 params+=("--addv" "$f" "$A")
780 mon_host="$mon_host $A"
781 wconf <<EOF
7c673cae
FG
782[mon.$f]
783 host = $HOSTNAME
784 mon data = $CEPH_DEV_DIR/mon.$f
7c673cae 785EOF
9f95a23c
TL
786 count=$(($count + 2))
787 done
788 wconf <<EOF
11fdf7f2
TL
789[global]
790 mon host = $mon_host
791EOF
9f95a23c 792 prun "$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
7c673cae 793
9f95a23c
TL
794 for f in $MONS
795 do
796 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
797 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
798 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
799 done
7c673cae 800
9f95a23c
TL
801 prun rm -- "$monmap_fn"
802 fi
7c673cae 803
9f95a23c
TL
804 # start monitors
805 for f in $MONS
806 do
807 run 'mon' $f $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
808 done
7c673cae
FG
809}
810
811start_osd() {
9f95a23c
TL
812 if [ $inc_osd_num -gt 0 ]; then
813 old_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
814 start=$old_maxosd
815 end=$(($start-1+$inc_osd_num))
816 overwrite_conf=1 # fake wconf
817 else
818 start=0
819 end=$(($CEPH_NUM_OSD-1))
820 fi
821 local osds_wait
822 for osd in `seq $start $end`
7c673cae 823 do
9f95a23c
TL
824 local extra_seastar_args
825 if [ "$ceph_osd" == "crimson-osd" ]; then
826 # designate a single CPU node $osd for osd.$osd
827 extra_seastar_args="--smp 1 --cpuset $osd"
828 if [ "$debug" -ne 0 ]; then
829 extra_seastar_args+=" --debug"
830 fi
831 fi
832 if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
833 wconf <<EOF
7c673cae
FG
834[osd.$osd]
835 host = $HOSTNAME
836EOF
9f95a23c
TL
837 if [ "$spdk_enabled" -eq 1 ]; then
838 wconf <<EOF
839 bluestore_block_path = spdk:$(get_pci_selector $((osd+1)))
840EOF
841 fi
7c673cae
FG
842
843 rm -rf $CEPH_DEV_DIR/osd$osd || true
844 if command -v btrfs > /dev/null; then
845 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
846 fi
9f95a23c
TL
847 if [ -n "$filestore_path" ]; then
848 ln -s $filestore_path $CEPH_DEV_DIR/osd$osd
849 elif [ -n "$kstore_path" ]; then
850 ln -s $kstore_path $CEPH_DEV_DIR/osd$osd
851 else
852 mkdir -p $CEPH_DEV_DIR/osd$osd
853 if [ -n "${bluestore_dev[$osd]}" ]; then
854 dd if=/dev/zero of=${bluestore_dev[$osd]} bs=1M count=1
855 ln -s ${bluestore_dev[$osd]} $CEPH_DEV_DIR/osd$osd/block
856 wconf <<EOF
857 bluestore fsck on mount = false
858EOF
859 fi
860 fi
7c673cae
FG
861
862 local uuid=`uuidgen`
863 echo "add osd$osd $uuid"
9f95a23c
TL
864 OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
865 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
11fdf7f2 866 ceph_adm osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
9f95a23c
TL
867 rm $CEPH_DEV_DIR/osd$osd/new.json
868 $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args
7c673cae
FG
869
870 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
9f95a23c 871 cat > $key_fn<<EOF
3efd9988 872[osd.$osd]
9f95a23c 873 key = $OSD_SECRET
3efd9988 874EOF
7c673cae 875 fi
31f18b77 876 echo start osd.$osd
9f95a23c
TL
877 local osd_pid
878 run 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
879 $extra_seastar_args $extra_osd_args \
880 -i $osd $ARGS $COSD_ARGS &
881 osd_pid=$!
882 if $parallel; then
883 osds_wait=$osd_pid
884 else
885 wait $osd_pid
886 fi
7c673cae 887 done
9f95a23c
TL
888 if $parallel; then
889 for p in $osds_wait; do
890 wait $p
891 done
892 debug echo OSDs started
893 fi
894 if [ $inc_osd_num -gt 0 ]; then
895 # update num osd
896 new_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
897 sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
898 fi
7c673cae
FG
899}
900
901start_mgr() {
902 local mgr=0
11fdf7f2 903 local ssl=${DASHBOARD_SSL:-1}
31f18b77
FG
904 # avoid monitors on nearby ports (which test/*.sh use extensively)
905 MGR_PORT=$(($CEPH_PORT + 1000))
9f95a23c 906 PROMETHEUS_PORT=9283
7c673cae
FG
907 for name in x y z a b c d e f g h i j k l m n o p
908 do
909 [ $mgr -eq $CEPH_NUM_MGR ] && break
910 mgr=$(($mgr + 1))
911 if [ "$new" -eq 1 ]; then
912 mkdir -p $CEPH_DEV_DIR/mgr.$name
913 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
914 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
915 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
7c673cae 916
11fdf7f2 917 wconf <<EOF
7c673cae
FG
918[mgr.$name]
919 host = $HOSTNAME
920EOF
921
11fdf7f2
TL
922 if $with_mgr_dashboard ; then
923 local port_option="ssl_server_port"
924 local http_proto="https"
925 if [ "$ssl" == "0" ]; then
926 port_option="server_port"
927 http_proto="http"
928 ceph_adm config set mgr mgr/dashboard/ssl false --force
929 fi
930 ceph_adm config set mgr mgr/dashboard/$name/$port_option $MGR_PORT --force
931 if [ $mgr -eq 1 ]; then
932 DASH_URLS="$http_proto://$IP:$MGR_PORT"
933 else
934 DASH_URLS+=", $http_proto://$IP:$MGR_PORT"
935 fi
936 fi
937 MGR_PORT=$(($MGR_PORT + 1000))
9f95a23c
TL
938 ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
939 PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
31f18b77 940
11fdf7f2
TL
941 ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
942 if [ $mgr -eq 1 ]; then
943 RESTFUL_URLS="https://$IP:$MGR_PORT"
944 else
945 RESTFUL_URLS+=", https://$IP:$MGR_PORT"
946 fi
947 MGR_PORT=$(($MGR_PORT + 1000))
948 fi
31f18b77 949
9f95a23c
TL
950 debug echo "Starting mgr.${name}"
951 run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
7c673cae 952 done
31f18b77 953
11fdf7f2
TL
954 if [ "$new" -eq 1 ]; then
955 # setting login credentials for dashboard
956 if $with_mgr_dashboard; then
9f95a23c
TL
957 while ! ceph_adm -h | grep -c -q ^dashboard ; do
958 debug echo 'waiting for mgr dashboard module to start'
959 sleep 1
960 done
961 ceph_adm dashboard ac-user-create --force-password admin admin administrator
11fdf7f2 962 if [ "$ssl" != "0" ]; then
9f95a23c
TL
963 if ! ceph_adm dashboard create-self-signed-cert; then
964 debug echo dashboard module not working correctly!
11fdf7f2 965 fi
7c673cae 966 fi
11fdf7f2 967 fi
7c673cae 968
9f95a23c
TL
969 while ! ceph_adm -h | grep -c -q ^restful ; do
970 debug echo 'waiting for mgr restful module to start'
971 sleep 1
972 done
973 if ceph_adm restful create-self-signed-cert; then
11fdf7f2
TL
974 SF=`mktemp`
975 ceph_adm restful create-key admin -o $SF
976 RESTFUL_SECRET=`cat $SF`
977 rm $SF
978 else
9f95a23c 979 debug echo MGR Restful is not working, perhaps the package is not installed?
7c673cae
FG
980 fi
981 fi
9f95a23c
TL
982
983 if [ "$cephadm" -eq 1 ]; then
984 debug echo Enabling cephadm orchestrator
985 ceph_adm config-key set mgr/cephadm/ssh_identity_key -i ~/.ssh/id_rsa
986 ceph_adm config-key set mgr/cephadm/ssh_identity_pub -i ~/.ssh/id_rsa.pub
987 ceph_adm mgr module enable cephadm
988 ceph_adm orch set backend cephadm
989 ceph_adm orch host add $HOSTNAME
990 ceph_adm orch apply crash '*'
991 ceph_adm config set mgr mgr/cephadm/allow_ptrace true
992 fi
11fdf7f2 993}
7c673cae 994
11fdf7f2 995start_mds() {
7c673cae
FG
996 local mds=0
997 for name in a b c d e f g h i j k l m n o p
998 do
9f95a23c
TL
999 [ $mds -eq $CEPH_NUM_MDS ] && break
1000 mds=$(($mds + 1))
7c673cae 1001
9f95a23c
TL
1002 if [ "$new" -eq 1 ]; then
1003 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
1004 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
1005 wconf <<EOF
7c673cae
FG
1006[mds.$name]
1007 host = $HOSTNAME
1008EOF
9f95a23c
TL
1009 if [ "$standby" -eq 1 ]; then
1010 mkdir -p $CEPH_DEV_DIR/mds.${name}s
1011 wconf <<EOF
1012 mds standby for rank = $mds
7c673cae
FG
1013[mds.${name}s]
1014 mds standby replay = true
1015 mds standby for name = ${name}
1016EOF
9f95a23c
TL
1017 fi
1018 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
1019 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow rw tag cephfs *=*' mds 'allow' mgr 'allow profile mds'
1020 if [ "$standby" -eq 1 ]; then
1021 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
1022 "$CEPH_DEV_DIR/mds.${name}s/keyring"
1023 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
1024 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
1025 fi
1026 fi
7c673cae 1027
9f95a23c
TL
1028 run 'mds' $name $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
1029 if [ "$standby" -eq 1 ]; then
1030 run 'mds' $name $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
1031 fi
7c673cae
FG
1032
1033 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
1034 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
1035 #ceph_adm mds set max_mds 2
1036 done
11fdf7f2
TL
1037
1038 if [ $new -eq 1 ]; then
1039 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
1040 sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
1041 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
1042 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
1043 fi
1044
9f95a23c
TL
1045 # wait for volume module to load
1046 while ! ceph_adm fs volume ls ; do sleep 1 ; done
11fdf7f2
TL
1047 local fs=0
1048 for name in a b c d e f g h i j k l m n o p
1049 do
9f95a23c
TL
1050 ceph_adm fs volume create ${name}
1051 ceph_adm fs authorize ${name} "client.fs_${name}" / rwp >> "$keyring_fn"
11fdf7f2
TL
1052 fs=$(($fs + 1))
1053 [ $fs -eq $CEPH_NUM_FS ] && break
1054 done
1055 fi
1056 fi
1057
7c673cae
FG
1058}
1059
9f95a23c
TL
1060# Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
1061# (version 2.7.6-2 and above) packages installed. On Fedora>=30 these packages
1062# can be installed directly with 'dnf'. For CentOS>=8 the packages need to be
1063# downloaded first from https://download.nfs-ganesha.org/2.7/2.7.6/CentOS/ and
1064# then install it. Similarly for Ubuntu 16.04 follow the instructions on
1065# https://launchpad.net/~nfs-ganesha/+archive/ubuntu/nfs-ganesha-2.7
1066
1067start_ganesha() {
1068 GANESHA_PORT=$(($CEPH_PORT + 4000))
1069 local ganesha=0
1070
1071 for name in a b c d e f g h i j k l m n o p
1072 do
1073 [ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
1074
1075 port=$(($GANESHA_PORT + ganesha))
1076 ganesha=$(($ganesha + 1))
1077 ganesha_dir="$CEPH_DEV_DIR/ganesha.$name"
1078
1079 echo "Starting ganesha.$name on port: $port"
1080
1081 prun rm -rf $ganesha_dir
1082 prun mkdir -p $ganesha_dir
1083
1084 echo "NFS_CORE_PARAM {
1085 Enable_NLM = false;
1086 Enable_RQUOTA = false;
1087 Protocols = 4;
1088 NFS_Port = $port;
1089}
1090
1091CACHEINODE {
1092 Dir_Chunk = 0;
1093 NParts = 1;
1094 Cache_Size = 1;
1095}
1096
1097NFSv4 {
1098 RecoveryBackend = 'rados_cluster';
1099 Minor_Versions = 1, 2;
1100}
1101
1102EXPORT {
1103 Export_Id = 100;
1104 Transports = TCP;
1105 Path = /;
1106 Pseudo = /ceph/;
1107 Protocols = 4;
1108 Access_Type = RW;
1109 Attr_Expiration_Time = 0;
1110 Squash = None;
1111 FSAL {
1112 Name = CEPH;
1113 }
1114}
1115
1116CEPH {
1117 Ceph_Conf = $conf_fn;
1118}
1119
1120RADOS_KV {
1121 Ceph_Conf = $conf_fn;
1122 pool = 'nfs-ganesha';
1123 namespace = 'ganesha';
1124 UserId = 'admin';
1125 nodeid = $name;
1126}" > "$ganesha_dir/ganesha.conf"
1127
1128
1129 wconf <<EOF
1130[ganesha.$name]
1131 host = $HOSTNAME
1132 ip = $IP
1133 port = $port
1134 ganesha data = $ganesha_dir
1135 pid file = $ganesha_dir/ganesha.pid
1136EOF
1137
1138 if !($CEPH_BIN/rados lspools | grep "nfs-ganesha"); then
1139 prun ceph_adm osd pool create nfs-ganesha
1140 prun ceph_adm osd pool application enable nfs-ganesha nfs
1141 fi
1142
1143 prun ganesha-rados-grace -p nfs-ganesha -n ganesha add $name
1144 prun ganesha-rados-grace -p nfs-ganesha -n ganesha
1145
1146 prun /usr/bin/ganesha.nfsd -L "$ganesha_dir/ganesha.log" -f "$ganesha_dir/ganesha.conf" -p "$ganesha_dir/ganesha.pid" -N NIV_DEBUG
1147
1148 # Wait few seconds for grace period to be removed
1149 sleep 2
1150 prun ganesha-rados-grace -p nfs-ganesha -n ganesha
1151
1152 if $with_mgr_dashboard; then
1153 $CEPH_BIN/rados -p nfs-ganesha put "conf-$name" "$ganesha_dir/ganesha.conf"
1154 fi
1155 done
1156
1157 if $with_mgr_dashboard; then
1158 ceph_adm dashboard set-ganesha-clusters-rados-pool-namespace nfs-ganesha
1159 fi
1160}
1161
7c673cae
FG
1162if [ "$debug" -eq 0 ]; then
1163 CMONDEBUG='
1164 debug mon = 10
1165 debug ms = 1'
7c673cae 1166else
9f95a23c 1167 debug echo "** going verbose **"
7c673cae
FG
1168 CMONDEBUG='
1169 debug mon = 20
1170 debug paxos = 20
1171 debug auth = 20
9f95a23c 1172 debug mgrc = 20
7c673cae 1173 debug ms = 1'
7c673cae
FG
1174fi
1175
1176if [ -n "$MON_ADDR" ]; then
9f95a23c
TL
1177 CMON_ARGS=" -m "$MON_ADDR
1178 COSD_ARGS=" -m "$MON_ADDR
1179 CMDS_ARGS=" -m "$MON_ADDR
7c673cae
FG
1180fi
1181
9f95a23c
TL
1182if [ -z "$CEPH_PORT" ]; then
1183 while [ true ]
1184 do
1185 CEPH_PORT="$(echo $(( RANDOM % 1000 + 40000 )))"
1186 ss -a -n | egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev/null 2>&1 || break
1187 done
7c673cae
FG
1188fi
1189
1190[ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
1191
1192# sudo if btrfs
9f95a23c 1193[ -d $CEPH_DEV_DIR/osd0/. ] && [ -e $CEPH_DEV_DIR/sudo ] && SUDO="sudo"
7c673cae 1194
9f95a23c
TL
1195if [ $inc_osd_num -eq 0 ]; then
1196 prun $SUDO rm -f core*
1197fi
7c673cae 1198
9f95a23c
TL
1199[ -d $CEPH_ASOK_DIR ] || mkdir -p $CEPH_ASOK_DIR
1200[ -d $CEPH_OUT_DIR ] || mkdir -p $CEPH_OUT_DIR
1201[ -d $CEPH_DEV_DIR ] || mkdir -p $CEPH_DEV_DIR
1202if [ $inc_osd_num -eq 0 ]; then
1203 $SUDO rm -rf $CEPH_OUT_DIR/*
1204fi
1205[ -d gmon ] && $SUDO rm -rf gmon/*
7c673cae 1206
9f95a23c 1207[ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
7c673cae
FG
1208
1209
1210# figure machine's ip
1211HOSTNAME=`hostname -s`
1212if [ -n "$ip" ]; then
1213 IP="$ip"
1214else
1215 echo hostname $HOSTNAME
1216 if [ -x "$(which ip 2>/dev/null)" ]; then
9f95a23c 1217 IP_CMD="ip addr"
7c673cae 1218 else
9f95a23c 1219 IP_CMD="ifconfig"
7c673cae 1220 fi
9f95a23c 1221 # filter out IPv4 and localhost addresses
7c673cae
FG
1222 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
1223 # if nothing left, try using localhost address, it might work
1224 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
1225fi
1226echo "ip $IP"
1227echo "port $CEPH_PORT"
1228
1229
1230[ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
1231
1232ceph_adm() {
1233 if [ "$cephx" -eq 1 ]; then
1234 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
1235 else
1236 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
1237 fi
1238}
1239
9f95a23c
TL
1240if [ $inc_osd_num -gt 0 ]; then
1241 start_osd
1242 exit
1243fi
1244
7c673cae
FG
1245if [ "$new" -eq 1 ]; then
1246 prepare_conf
1247fi
1248
1249if [ $CEPH_NUM_MON -gt 0 ]; then
1250 start_mon
11fdf7f2 1251
9f95a23c 1252 debug echo Populating config ...
11fdf7f2
TL
1253 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1254[global]
1255osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
1256osd_pool_default_min_size = 1
11fdf7f2
TL
1257
1258[mon]
1259mon_osd_reporter_subtree_level = osd
1260mon_data_avail_warn = 2
1261mon_data_avail_crit = 1
1262mon_allow_pool_delete = true
1263
1264[osd]
1265osd_scrub_load_threshold = 2000
1266osd_debug_op_order = true
1267osd_debug_misdirected_ops = true
1268osd_copyfrom_max_chunk = 524288
1269
1270[mds]
1271mds_debug_frag = true
1272mds_debug_auth_pins = true
1273mds_debug_subtrees = true
1274
9f95a23c
TL
1275[mgr]
1276mgr/telemetry/nag = false
1277mgr/telemetry/enable = false
1278
11fdf7f2
TL
1279EOF
1280
1281 if [ "$debug" -ne 0 ]; then
9f95a23c
TL
1282 debug echo Setting debug configs ...
1283 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
11fdf7f2
TL
1284[mgr]
1285debug_ms = 1
1286debug_mgr = 20
1287debug_monc = 20
1288debug_mon = 20
1289
1290[osd]
1291debug_ms = 1
1292debug_osd = 25
1293debug_objecter = 20
1294debug_monc = 20
1295debug_mgrc = 20
1296debug_journal = 20
1297debug_filestore = 20
1298debug_bluestore = 20
1299debug_bluefs = 20
1300debug_rocksdb = 20
1301debug_bdev = 20
1302debug_reserver = 10
1303debug_objclass = 20
1304
1305[mds]
1306debug_ms = 1
1307debug_mds = 20
1308debug_monc = 20
1309debug_mgrc = 20
1310mds_debug_scatterstat = true
1311mds_verify_scatter = true
1312EOF
1313 fi
7c673cae
FG
1314fi
1315
c07f9fc5
FG
1316if [ $CEPH_NUM_MGR -gt 0 ]; then
1317 start_mgr
1318fi
1319
7c673cae
FG
1320# osd
1321if [ $CEPH_NUM_OSD -gt 0 ]; then
1322 start_osd
1323fi
1324
1325# mds
1326if [ "$smallmds" -eq 1 ]; then
1327 wconf <<EOF
1328[mds]
9f95a23c
TL
1329 mds log max segments = 2
1330 # Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
1331 mds cache memory limit = 100M
7c673cae
FG
1332EOF
1333fi
1334
1335if [ $CEPH_NUM_MDS -gt 0 ]; then
1336 start_mds
9f95a23c
TL
1337 # key with access to all FS
1338 ceph_adm fs authorize \* "client.fs" / rwp >> "$keyring_fn"
7c673cae
FG
1339fi
1340
1341# Don't set max_mds until all the daemons are started, otherwise
1342# the intended standbys might end up in active roles.
1343if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1344 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
1345fi
1346fs=0
1347for name in a b c d e f g h i j k l m n o p
1348do
1349 [ $fs -eq $CEPH_NUM_FS ] && break
1350 fs=$(($fs + 1))
1351 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
7c673cae
FG
1352 ceph_adm fs set "cephfs_${name}" max_mds "$CEPH_MAX_MDS"
1353 fi
1354done
1355
1356# mgr
1357
7c673cae
FG
1358if [ "$ec" -eq 1 ]; then
1359 ceph_adm <<EOF
1360osd erasure-code-profile set ec-profile m=2 k=2
9f95a23c 1361osd pool create ec erasure ec-profile
7c673cae
FG
1362EOF
1363fi
1364
9f95a23c
TL
1365# Ganesha Daemons
1366if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
1367 start_ganesha
1368fi
1369
7c673cae
FG
1370do_cache() {
1371 while [ -n "$*" ]; do
9f95a23c
TL
1372 p="$1"
1373 shift
1374 debug echo "creating cache for pool $p ..."
1375 ceph_adm <<EOF
1376osd pool create ${p}-cache
7c673cae
FG
1377osd tier add $p ${p}-cache
1378osd tier cache-mode ${p}-cache writeback
1379osd tier set-overlay $p ${p}-cache
1380EOF
1381 done
1382}
1383do_cache $cache
1384
1385do_hitsets() {
1386 while [ -n "$*" ]; do
9f95a23c
TL
1387 pool="$1"
1388 type="$2"
1389 shift
1390 shift
1391 debug echo "setting hit_set on pool $pool type $type ..."
1392 ceph_adm <<EOF
7c673cae
FG
1393osd pool set $pool hit_set_type $type
1394osd pool set $pool hit_set_count 8
1395osd pool set $pool hit_set_period 30
1396EOF
1397 done
1398}
1399do_hitsets $hitset
1400
224ce89b 1401do_rgw_create_users()
7c673cae
FG
1402{
1403 # Create S3 user
1404 local akey='0555b35654ad1656d804'
1405 local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
9f95a23c 1406 debug echo "setting up user testid"
7c673cae
FG
1407 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
1408
1409 # Create S3-test users
1410 # See: https://github.com/ceph/s3-tests
9f95a23c 1411 debug echo "setting up s3-test users"
7c673cae
FG
1412 $CEPH_BIN/radosgw-admin user create \
1413 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1414 --access-key ABCDEFGHIJKLMNOPQRST \
1415 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
1416 --display-name youruseridhere \
1417 --email s3@example.com -c $conf_fn > /dev/null
1418 $CEPH_BIN/radosgw-admin user create \
1419 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
1420 --access-key NOPQRSTUVWXYZABCDEFG \
1421 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
1422 --display-name john.doe \
1423 --email john.doe@example.com -c $conf_fn > /dev/null
31f18b77
FG
1424 $CEPH_BIN/radosgw-admin user create \
1425 --tenant testx \
1426 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1427 --access-key HIJKLMNOPQRSTUVWXYZA \
1428 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
1429 --display-name tenanteduser \
1430 --email tenanteduser@example.com -c $conf_fn > /dev/null
7c673cae
FG
1431
1432 # Create Swift user
9f95a23c 1433 debug echo "setting up user tester"
7c673cae
FG
1434 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
1435
1436 echo ""
1437 echo "S3 User Info:"
1438 echo " access key: $akey"
1439 echo " secret key: $skey"
1440 echo ""
1441 echo "Swift User Info:"
1442 echo " account : test"
1443 echo " user : tester"
1444 echo " password : testing"
1445 echo ""
224ce89b 1446}
7c673cae 1447
224ce89b
WB
1448do_rgw()
1449{
1450 if [ "$new" -eq 1 ]; then
9f95a23c 1451 do_rgw_create_users
224ce89b 1452 if [ -n "$rgw_compression" ]; then
9f95a23c 1453 debug echo "setting compression type=$rgw_compression"
224ce89b
WB
1454 $CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
1455 fi
1456 fi
7c673cae 1457 # Start server
7c673cae
FG
1458 RGWDEBUG=""
1459 if [ "$debug" -ne 0 ]; then
9f95a23c 1460 RGWDEBUG="--debug-rgw=20 --debug-ms=1"
7c673cae
FG
1461 fi
1462
11fdf7f2
TL
1463 local CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT}"
1464 local CEPH_RGW_HTTPS="${CEPH_RGW_PORT: -1}"
1465 if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
1466 CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT::-1}"
1467 else
1468 CEPH_RGW_HTTPS=""
1469 fi
7c673cae 1470 RGWSUDO=
11fdf7f2 1471 [ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO=sudo
9f95a23c
TL
1472
1473 current_port=$CEPH_RGW_PORT
1474 for n in $(seq 1 $CEPH_NUM_RGW); do
1475 rgw_name="client.rgw.${current_port}"
1476
1477 ceph_adm auth get-or-create $rgw_name \
1478 mon 'allow rw' \
1479 osd 'allow rwx' \
1480 mgr 'allow rw' \
1481 >> "$keyring_fn"
1482
1483 debug echo start rgw on http${CEPH_RGW_HTTPS}://localhost:${current_port}
1484 run 'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn \
1485 --log-file=${CEPH_OUT_DIR}/radosgw.${current_port}.log \
1486 --admin-socket=${CEPH_OUT_DIR}/radosgw.${current_port}.asok \
1487 --pid-file=${CEPH_OUT_DIR}/radosgw.${current_port}.pid \
1488 ${RGWDEBUG} \
1489 -n ${rgw_name} \
1490 "--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}"
1491
11fdf7f2 1492 i=$(($i + 1))
224ce89b 1493 [ $i -eq $CEPH_NUM_RGW ] && break
9f95a23c
TL
1494
1495 current_port=$((current_port+1))
7c673cae
FG
1496 done
1497}
1498if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1499 do_rgw
1500fi
1501
9f95a23c 1502debug echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
7c673cae 1503
31f18b77 1504echo ""
11fdf7f2
TL
1505if [ "$new" -eq 1 ]; then
1506 if $with_mgr_dashboard; then
1507 echo "dashboard urls: $DASH_URLS"
1508 echo " w/ user/pass: admin / admin"
1509 fi
1510 echo "restful urls: $RESTFUL_URLS"
1511 echo " w/ user/pass: admin / $RESTFUL_SECRET"
1512 echo ""
1513fi
7c673cae 1514echo ""
9f95a23c
TL
1515# add header to the environment file
1516{
1517 echo "#"
1518 echo "# source this file into your shell to set up the environment."
1519 echo "# For example:"
1520 echo "# $ . $CEPH_DIR/vstart_environment.sh"
1521 echo "#"
1522} > $CEPH_DIR/vstart_environment.sh
1523{
1524 echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
1525 echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
7c673cae 1526
9f95a23c
TL
1527 if [ "$CEPH_DIR" != "$PWD" ]; then
1528 echo "export CEPH_CONF=$conf_fn"
1529 echo "export CEPH_KEYRING=$keyring_fn"
1530 fi
1531
1532 if [ -n "$CEPHFS_SHELL" ]; then
1533 echo "alias cephfs-shell=$CEPHFS_SHELL"
1534 fi
1535} | tee -a $CEPH_DIR/vstart_environment.sh
31f18b77
FG
1536
1537echo "CEPH_DEV=1"
1538
9f95a23c
TL
1539# always keep this section at the very bottom of this file
1540STRAY_CONF_PATH="/etc/ceph/ceph.conf"
1541if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
1542 echo ""
1543 echo ""
1544 echo "WARNING:"
1545 echo " Please remove stray $STRAY_CONF_PATH if not needed."
1546 echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
1547 echo " and may lead to undesired results."
1548 echo ""
1549 echo "NOTE:"
1550 echo " Remember to restart cluster after removing $STRAY_CONF_PATH"
1551fi