]> git.proxmox.com Git - ceph.git/blame - ceph/src/vstart.sh
import quincy beta 17.1.0
[ceph.git] / ceph / src / vstart.sh
CommitLineData
11fdf7f2 1#!/usr/bin/env bash
9f95a23c
TL
2# -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
3# vim: softtabstop=4 shiftwidth=4 expandtab
7c673cae
FG
4
5# abort on failure
6set -e
7
9f95a23c
TL
8quoted_print() {
9 for s in "$@"; do
10 if [[ "$s" =~ \ ]]; then
11 printf -- "'%s' " "$s"
12 else
13 printf -- "$s "
14 fi
15 done
16 printf '\n'
17}
18
19debug() {
20 "$@" >&2
21}
22
23prunb() {
24 debug quoted_print "$@" '&'
20effc67 25 PATH=$CEPH_BIN:$PATH "$@" &
9f95a23c
TL
26}
27
28prun() {
29 debug quoted_print "$@"
20effc67 30 PATH=$CEPH_BIN:$PATH "$@"
9f95a23c
TL
31}
32
33
7c673cae 34if [ -n "$VSTART_DEST" ]; then
9f95a23c
TL
35 SRC_PATH=`dirname $0`
36 SRC_PATH=`(cd $SRC_PATH; pwd)`
7c673cae 37
9f95a23c
TL
38 CEPH_DIR=$SRC_PATH
39 CEPH_BIN=${PWD}/bin
40 CEPH_LIB=${PWD}/lib
7c673cae 41
9f95a23c
TL
42 CEPH_CONF_PATH=$VSTART_DEST
43 CEPH_DEV_DIR=$VSTART_DEST/dev
44 CEPH_OUT_DIR=$VSTART_DEST/out
45 CEPH_ASOK_DIR=$VSTART_DEST/out
7c673cae
FG
46fi
47
11fdf7f2
TL
48get_cmake_variable() {
49 local variable=$1
9f95a23c 50 grep "${variable}:" CMakeCache.txt | cut -d "=" -f 2
11fdf7f2
TL
51}
52
7c673cae
FG
53# for running out of the CMake build directory
54if [ -e CMakeCache.txt ]; then
9f95a23c
TL
55 # Out of tree build, learn source location from CMakeCache.txt
56 CEPH_ROOT=$(get_cmake_variable ceph_SOURCE_DIR)
57 CEPH_BUILD_DIR=`pwd`
58 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
7c673cae
FG
59fi
60
9f95a23c 61# use CEPH_BUILD_ROOT to vstart from a 'make install'
7c673cae 62if [ -n "$CEPH_BUILD_ROOT" ]; then
9f95a23c
TL
63 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
64 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
f67539c2 65 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_ROOT/external/lib
9f95a23c
TL
66 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
67 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
68 # make install should install python extensions into PYTHONPATH
7c673cae 69elif [ -n "$CEPH_ROOT" ]; then
9f95a23c
TL
70 [ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL=$CEPH_ROOT/src/tools/cephfs/cephfs-shell
71 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
72 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
73 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
74 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
75 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
f67539c2 76 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_DIR/external/lib
9f95a23c
TL
77 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
78 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
79 [ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON=$CEPH_ROOT/src/python-common
7c673cae
FG
80fi
81
82if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
83 PATH=$(pwd):$PATH
84fi
85
86[ -z "$PYBIND" ] && PYBIND=./pybind
87
9f95a23c
TL
88[ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON="$CEPH_PYTHON_COMMON:"
89CYTHON_PYTHONPATH="$CEPH_LIB/cython_modules/lib.3"
90export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
91
f67539c2
TL
92export LD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$LD_LIBRARY_PATH
93export DYLD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$DYLD_LIBRARY_PATH
31f18b77 94# Suppress logging for regular use that indicated that we are using a
9f95a23c 95# development version. vstart.sh is only used during testing and
31f18b77
FG
96# development
97export CEPH_DEV=1
7c673cae
FG
98
99[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
100[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
101[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
102[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
103[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
104[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
f6b5b4d7 105[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM="$NFS"
7c673cae
FG
106
107# if none of the CEPH_NUM_* number is specified, kill the existing
108# cluster.
109if [ -z "$CEPH_NUM_MON" -a \
110 -z "$CEPH_NUM_OSD" -a \
111 -z "$CEPH_NUM_MDS" -a \
9f95a23c
TL
112 -z "$CEPH_NUM_MGR" -a \
113 -z "$GANESHA_DAEMON_NUM" ]; then
7c673cae
FG
114 kill_all=1
115else
116 kill_all=0
117fi
118
119[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
120[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
121[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
122[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
123[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
124[ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
125[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
9f95a23c 126[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM=0
7c673cae
FG
127
128[ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
129[ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
130[ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
131[ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
132[ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
133
134if [ $CEPH_NUM_OSD -gt 3 ]; then
135 OSD_POOL_DEFAULT_SIZE=3
136else
137 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
138fi
139
140extra_conf=""
141new=0
142standby=0
143debug=0
20effc67 144trace=0
7c673cae
FG
145ip=""
146nodaemon=0
9f95a23c 147redirect=0
7c673cae
FG
148smallmds=0
149short=0
150ec=0
9f95a23c
TL
151cephadm=0
152parallel=true
7c673cae 153hitset=""
9f95a23c 154overwrite_conf=0
7c673cae 155cephx=1 #turn cephx on by default
11fdf7f2 156gssapi_authx=0
7c673cae 157cache=""
11fdf7f2
TL
158if [ `uname` = FreeBSD ]; then
159 objectstore="filestore"
160else
161 objectstore="bluestore"
162fi
9f95a23c 163ceph_osd=ceph-osd
11fdf7f2 164rgw_frontend="beast"
224ce89b 165rgw_compression=""
7c673cae 166lockdep=${LOCKDEP:-1}
11fdf7f2 167spdk_enabled=0 #disable SPDK by default
f67539c2
TL
168zoned_enabled=0
169io_uring_enabled=0
170with_jaeger=0
11fdf7f2
TL
171
172with_mgr_dashboard=true
173if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
174 [[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
9f95a23c
TL
175 debug echo "ceph-mgr dashboard not built - disabling."
176 with_mgr_dashboard=false
11fdf7f2 177fi
20effc67 178with_mgr_restful=false
7c673cae 179
31f18b77 180filestore_path=
11fdf7f2 181kstore_path=
20effc67
TL
182declare -a block_devs
183declare -a secondary_block_devs
31f18b77 184
7c673cae
FG
185VSTART_SEC="client.vstart.sh"
186
187MON_ADDR=""
31f18b77
FG
188DASH_URLS=""
189RESTFUL_URLS=""
7c673cae
FG
190
191conf_fn="$CEPH_CONF_PATH/ceph.conf"
192keyring_fn="$CEPH_CONF_PATH/keyring"
7c673cae 193monmap_fn="/tmp/ceph_monmap.$$"
9f95a23c 194inc_osd_num=0
7c673cae 195
11fdf7f2
TL
196msgr="21"
197
20effc67
TL
198read -r -d '' usage <<EOF || true
199usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d
200options:
201 -d, --debug
202 -t, --trace
203 -s, --standby_mds: Generate standby-replay MDS for each active
204 -l, --localhost: use localhost instead of hostname
205 -i <ip>: bind to specific ip
206 -n, --new
207 --valgrind[_{osd,mds,mon,rgw}] 'toolname args...'
208 --nodaemon: use ceph-run as wrapper for mon/osd/mds
209 --redirect-output: only useful with nodaemon, directs output to log file
210 --smallmds: limit mds cache memory limit
211 -m ip:port specify monitor address
212 -k keep old configuration files (default)
213 -x enable cephx (on by default)
214 -X disable cephx
215 -g --gssapi enable Kerberos/GSSApi authentication
216 -G disable Kerberos/GSSApi authentication
217 --hitset <pool> <hit_set_type>: enable hitset tracking
218 -e : create an erasure pool\
219 -o config add extra config parameters to all sections
220 --rgw_port specify ceph rgw http listen port
221 --rgw_frontend specify the rgw frontend configuration
222 --rgw_compression specify the rgw compression plugin
223 --seastore use seastore as crimson osd backend
224 -b, --bluestore use bluestore as the osd objectstore backend (default)
225 -f, --filestore use filestore as the osd objectstore backend
226 -K, --kstore use kstore as the osd objectstore backend
227 --cyanstore use cyanstore as the osd objectstore backend
228 --memstore use memstore as the osd objectstore backend
229 --cache <pool>: enable cache tiering on pool
230 --short: short object names only; necessary for ext4 dev
231 --nolockdep disable lockdep
232 --multimds <count> allow multimds with maximum active count
233 --without-dashboard: do not run using mgr dashboard
234 --bluestore-spdk: enable SPDK and with a comma-delimited list of PCI-IDs of NVME device (e.g, 0000:81:00.0)
235 --msgr1: use msgr1 only
236 --msgr2: use msgr2 only
237 --msgr21: use msgr2 and msgr1
238 --crimson: use crimson-osd instead of ceph-osd
239 --crimson-foreground: use crimson-osd, but run it in the foreground
240 --osd-args: specify any extra osd specific options
241 --bluestore-devs: comma-separated list of blockdevs to use for bluestore
242 --bluestore-zoned: blockdevs listed by --bluestore-devs are zoned devices (HM-SMR HDD or ZNS SSD)
243 --bluestore-io-uring: enable io_uring backend
244 --inc-osd: append some more osds into existing vcluster
245 --cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]
246 --no-parallel: dont start all OSDs in parallel
247 --jaeger: use jaegertracing for tracing
248 --seastore-devs: comma-separated list of blockdevs to use for seastore
249 --seastore-secondary-des: comma-separated list of secondary blockdevs to use for seastore
250\n
251EOF
7c673cae
FG
252
253usage_exit() {
9f95a23c
TL
254 printf "$usage"
255 exit
7c673cae
FG
256}
257
20effc67
TL
258parse_block_devs() {
259 local opt_name=$1
260 shift
261 local devs=$1
262 shift
263 local dev
264 IFS=',' read -r -a block_devs <<< "$devs"
265 for dev in "${block_devs[@]}"; do
266 if [ ! -b $dev ] || [ ! -w $dev ]; then
267 echo "All $opt_name must refer to writable block devices"
268 exit 1
269 fi
270 done
271}
272
273parse_secondary_devs() {
274 local opt_name=$1
275 shift
276 local devs=$1
277 shift
278 local dev
279 IFS=',' read -r -a secondary_block_devs <<< "$devs"
280 for dev in "${secondary_block_devs[@]}"; do
281 if [ ! -b $dev ] || [ ! -w $dev ]; then
282 echo "All $opt_name must refer to writable block devices"
283 exit 1
284 fi
285 done
286}
287
7c673cae
FG
288while [ $# -ge 1 ]; do
289case $1 in
f67539c2 290 -d | --debug)
9f95a23c
TL
291 debug=1
292 ;;
20effc67
TL
293 -t | --trace)
294 trace=1
295 ;;
7c673cae 296 -s | --standby_mds)
9f95a23c
TL
297 standby=1
298 ;;
f67539c2 299 -l | --localhost)
9f95a23c
TL
300 ip="127.0.0.1"
301 ;;
f67539c2 302 -i)
9f95a23c
TL
303 [ -z "$2" ] && usage_exit
304 ip="$2"
305 shift
306 ;;
f67539c2 307 -e)
9f95a23c
TL
308 ec=1
309 ;;
f67539c2 310 --new | -n)
9f95a23c
TL
311 new=1
312 ;;
f67539c2 313 --inc-osd)
9f95a23c
TL
314 new=0
315 kill_all=0
316 inc_osd_num=$2
317 if [ "$inc_osd_num" == "" ]; then
318 inc_osd_num=1
319 else
320 shift
321 fi
322 ;;
f67539c2 323 --short)
9f95a23c
TL
324 short=1
325 ;;
f67539c2 326 --crimson)
9f95a23c 327 ceph_osd=crimson-osd
20effc67
TL
328 nodaemon=1
329 msgr=2
330 ;;
331 --crimson-foreground)
332 ceph_osd=crimson-osd
333 nodaemon=0
334 msgr=2
9f95a23c 335 ;;
f67539c2 336 --osd-args)
9f95a23c
TL
337 extra_osd_args="$2"
338 shift
339 ;;
f67539c2 340 --msgr1)
9f95a23c
TL
341 msgr="1"
342 ;;
f67539c2 343 --msgr2)
9f95a23c
TL
344 msgr="2"
345 ;;
f67539c2 346 --msgr21)
9f95a23c
TL
347 msgr="21"
348 ;;
f67539c2 349 --cephadm)
9f95a23c
TL
350 cephadm=1
351 ;;
f67539c2 352 --no-parallel)
9f95a23c
TL
353 parallel=false
354 ;;
f67539c2 355 --valgrind)
9f95a23c
TL
356 [ -z "$2" ] && usage_exit
357 valgrind=$2
358 shift
359 ;;
f67539c2 360 --valgrind_args)
9f95a23c
TL
361 valgrind_args="$2"
362 shift
363 ;;
f67539c2 364 --valgrind_mds)
9f95a23c
TL
365 [ -z "$2" ] && usage_exit
366 valgrind_mds=$2
367 shift
368 ;;
f67539c2 369 --valgrind_osd)
9f95a23c
TL
370 [ -z "$2" ] && usage_exit
371 valgrind_osd=$2
372 shift
373 ;;
f67539c2 374 --valgrind_mon)
9f95a23c
TL
375 [ -z "$2" ] && usage_exit
376 valgrind_mon=$2
377 shift
378 ;;
f67539c2 379 --valgrind_mgr)
9f95a23c
TL
380 [ -z "$2" ] && usage_exit
381 valgrind_mgr=$2
382 shift
383 ;;
f67539c2 384 --valgrind_rgw)
9f95a23c
TL
385 [ -z "$2" ] && usage_exit
386 valgrind_rgw=$2
387 shift
388 ;;
f67539c2 389 --nodaemon)
9f95a23c
TL
390 nodaemon=1
391 ;;
392 --redirect-output)
393 redirect=1
394 ;;
f67539c2 395 --smallmds)
9f95a23c
TL
396 smallmds=1
397 ;;
f67539c2 398 --rgw_port)
9f95a23c
TL
399 CEPH_RGW_PORT=$2
400 shift
401 ;;
f67539c2 402 --rgw_frontend)
9f95a23c
TL
403 rgw_frontend=$2
404 shift
405 ;;
f67539c2 406 --rgw_compression)
9f95a23c
TL
407 rgw_compression=$2
408 shift
409 ;;
f67539c2 410 --kstore_path)
9f95a23c
TL
411 kstore_path=$2
412 shift
413 ;;
f67539c2 414 --filestore_path)
9f95a23c
TL
415 filestore_path=$2
416 shift
417 ;;
f67539c2 418 -m)
9f95a23c
TL
419 [ -z "$2" ] && usage_exit
420 MON_ADDR=$2
421 shift
422 ;;
f67539c2 423 -x)
9f95a23c
TL
424 cephx=1 # this is on be default, flag exists for historical consistency
425 ;;
f67539c2 426 -X)
9f95a23c
TL
427 cephx=0
428 ;;
429
11fdf7f2 430 -g | --gssapi)
9f95a23c
TL
431 gssapi_authx=1
432 ;;
11fdf7f2 433 -G)
9f95a23c
TL
434 gssapi_authx=0
435 ;;
11fdf7f2 436
f67539c2 437 -k)
9f95a23c
TL
438 if [ ! -r $conf_fn ]; then
439 echo "cannot use old configuration: $conf_fn not readable." >&2
440 exit
441 fi
442 new=0
443 ;;
f67539c2 444 --memstore)
9f95a23c
TL
445 objectstore="memstore"
446 ;;
20effc67
TL
447 --cyanstore)
448 objectstore="cyanstore"
449 ;;
450 --seastore)
451 objectstore="seastore"
452 ;;
f67539c2 453 -b | --bluestore)
9f95a23c
TL
454 objectstore="bluestore"
455 ;;
f67539c2 456 -f | --filestore)
9f95a23c
TL
457 objectstore="filestore"
458 ;;
f67539c2 459 -K | --kstore)
9f95a23c
TL
460 objectstore="kstore"
461 ;;
f67539c2 462 --hitset)
9f95a23c
TL
463 hitset="$hitset $2 $3"
464 shift
465 shift
466 ;;
f67539c2
TL
467 -o)
468 extra_conf+=$'\n'"$2"
9f95a23c
TL
469 shift
470 ;;
f67539c2 471 --cache)
9f95a23c
TL
472 if [ -z "$cache" ]; then
473 cache="$2"
474 else
475 cache="$cache $2"
476 fi
477 shift
478 ;;
f67539c2 479 --nolockdep)
9f95a23c
TL
480 lockdep=0
481 ;;
7c673cae
FG
482 --multimds)
483 CEPH_MAX_MDS="$2"
484 shift
485 ;;
11fdf7f2
TL
486 --without-dashboard)
487 with_mgr_dashboard=false
488 ;;
20effc67
TL
489 --with-restful)
490 with_mgr_restful=true
491 ;;
492 --seastore-devs)
493 parse_block_devs --seastore-devs "$2"
494 shift
495 ;;
496 --seastore-secondary-devs)
497 parse_secondary_devs --seastore-devs "$2"
498 shift
499 ;;
f67539c2 500 --bluestore-spdk)
11fdf7f2 501 [ -z "$2" ] && usage_exit
f67539c2 502 IFS=',' read -r -a bluestore_spdk_dev <<< "$2"
11fdf7f2
TL
503 spdk_enabled=1
504 shift
505 ;;
f67539c2 506 --bluestore-devs)
20effc67 507 parse_block_devs --bluestore-devs "$2"
9f95a23c
TL
508 shift
509 ;;
f67539c2
TL
510 --bluestore-zoned)
511 zoned_enabled=1
512 ;;
513 --bluestore-io-uring)
514 io_uring_enabled=1
515 shift
516 ;;
517 --jaeger)
518 with_jaeger=1
519 echo "with_jaeger $with_jaeger"
520 ;;
521 *)
9f95a23c 522 usage_exit
7c673cae
FG
523esac
524shift
525done
526
527if [ $kill_all -eq 1 ]; then
528 $SUDO $INIT_CEPH stop
529fi
530
9f95a23c
TL
531if [ "$new" -eq 0 ]; then
532 if [ -z "$CEPH_ASOK_DIR" ]; then
533 CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
534 fi
c07f9fc5 535 mkdir -p $CEPH_ASOK_DIR
9f95a23c 536 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
7c673cae 537 CEPH_NUM_MON="$MON"
9f95a23c 538 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
7c673cae 539 CEPH_NUM_OSD="$OSD"
9f95a23c 540 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
7c673cae 541 CEPH_NUM_MDS="$MDS"
9f95a23c 542 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
7c673cae 543 CEPH_NUM_MGR="$MGR"
9f95a23c 544 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
7c673cae 545 CEPH_NUM_RGW="$RGW"
f6b5b4d7
TL
546 NFS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
547 GANESHA_DAEMON_NUM="$NFS"
7c673cae 548else
9f95a23c
TL
549 # only delete if -n
550 if [ -e "$conf_fn" ]; then
551 asok_dir=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
552 rm -- "$conf_fn"
553 if [ $asok_dir != /var/run/ceph ]; then
c07f9fc5 554 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
c07f9fc5 555 fi
9f95a23c
TL
556 fi
557 if [ -z "$CEPH_ASOK_DIR" ]; then
558 CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
7c673cae
FG
559 fi
560fi
561
562ARGS="-c $conf_fn"
563
7c673cae
FG
564run() {
565 type=$1
566 shift
9f95a23c
TL
567 num=$1
568 shift
7c673cae
FG
569 eval "valg=\$valgrind_$type"
570 [ -z "$valg" ] && valg="$valgrind"
571
572 if [ -n "$valg" ]; then
573 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
574 sleep 1
575 else
576 if [ "$nodaemon" -eq 0 ]; then
577 prun "$@"
9f95a23c
TL
578 elif [ "$redirect" -eq 0 ]; then
579 prunb ${CEPH_ROOT}/src/ceph-run "$@" -f
7c673cae 580 else
9f95a23c 581 ( prunb ${CEPH_ROOT}/src/ceph-run "$@" -f ) >$CEPH_OUT_DIR/$type.$num.stdout 2>&1
7c673cae
FG
582 fi
583 fi
584}
585
586wconf() {
9f95a23c
TL
587 if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
588 cat >> "$conf_fn"
589 fi
7c673cae
FG
590}
591
9f95a23c
TL
592
593do_rgw_conf() {
594
595 if [ $CEPH_NUM_RGW -eq 0 ]; then
596 return 0
597 fi
598
599 # setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
600 # individual rgw's ids will be their ports.
601 current_port=$CEPH_RGW_PORT
602 for n in $(seq 1 $CEPH_NUM_RGW); do
603 wconf << EOF
604[client.rgw.${current_port}]
605 rgw frontends = $rgw_frontend port=${current_port}
606 admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
607EOF
608 current_port=$((current_port + 1))
609done
610
11fdf7f2
TL
611}
612
f67539c2
TL
613format_conf() {
614 local opts=$1
615 local indent=" "
616 local opt
617 local formatted
618 while read -r opt; do
619 if [ -z "$formatted" ]; then
620 formatted="${opt}"
621 else
622 formatted+=$'\n'${indent}${opt}
623 fi
624 done <<< "$opts"
625 echo "$formatted"
626}
627
7c673cae
FG
628prepare_conf() {
629 local DAEMONOPTS="
630 log file = $CEPH_OUT_DIR/\$name.log
c07f9fc5 631 admin socket = $CEPH_ASOK_DIR/\$name.asok
7c673cae
FG
632 chdir = \"\"
633 pid file = $CEPH_OUT_DIR/\$name.pid
634 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
635"
636
20effc67 637 local mgr_modules="iostat nfs"
11fdf7f2 638 if $with_mgr_dashboard; then
20effc67
TL
639 mgr_modules+=" dashboard"
640 fi
641 if $with_mgr_restful; then
642 mgr_modules+=" restful"
11fdf7f2
TL
643 fi
644
645 local msgr_conf=''
646 if [ $msgr -eq 21 ]; then
f67539c2
TL
647 msgr_conf="ms bind msgr2 = true
648 ms bind msgr1 = true"
11fdf7f2
TL
649 fi
650 if [ $msgr -eq 2 ]; then
f67539c2
TL
651 msgr_conf="ms bind msgr2 = true
652 ms bind msgr1 = false"
11fdf7f2
TL
653 fi
654 if [ $msgr -eq 1 ]; then
f67539c2
TL
655 msgr_conf="ms bind msgr2 = false
656 ms bind msgr1 = true"
11fdf7f2
TL
657 fi
658
7c673cae
FG
659 wconf <<EOF
660; generated by vstart.sh on `date`
661[$VSTART_SEC]
662 num mon = $CEPH_NUM_MON
663 num osd = $CEPH_NUM_OSD
664 num mds = $CEPH_NUM_MDS
665 num mgr = $CEPH_NUM_MGR
666 num rgw = $CEPH_NUM_RGW
9f95a23c 667 num ganesha = $GANESHA_DAEMON_NUM
7c673cae
FG
668
669[global]
670 fsid = $(uuidgen)
7c673cae 671 osd failsafe full ratio = .99
11fdf7f2 672 mon osd full ratio = .99
c07f9fc5
FG
673 mon osd nearfull ratio = .99
674 mon osd backfillfull ratio = .99
9f95a23c 675 mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
7c673cae
FG
676 erasure code dir = $EC_PATH
677 plugin dir = $CEPH_LIB
7c673cae
FG
678 filestore fd cache size = 32
679 run dir = $CEPH_OUT_DIR
9f95a23c 680 crash dir = $CEPH_OUT_DIR
7c673cae 681 enable experimental unrecoverable data corrupting features = *
9f95a23c
TL
682 osd_crush_chooseleaf_type = 0
683 debug asok assert abort = true
f67539c2
TL
684 $(format_conf "${msgr_conf}")
685 $(format_conf "${extra_conf}")
7c673cae 686EOF
9f95a23c
TL
687 if [ "$lockdep" -eq 1 ] ; then
688 wconf <<EOF
7c673cae
FG
689 lockdep = true
690EOF
9f95a23c
TL
691 fi
692 if [ "$cephx" -eq 1 ] ; then
693 wconf <<EOF
694 auth cluster required = cephx
695 auth service required = cephx
696 auth client required = cephx
7c673cae 697EOF
9f95a23c
TL
698 elif [ "$gssapi_authx" -eq 1 ] ; then
699 wconf <<EOF
700 auth cluster required = gss
701 auth service required = gss
702 auth client required = gss
703 gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
11fdf7f2 704EOF
9f95a23c
TL
705 else
706 wconf <<EOF
707 auth cluster required = none
708 auth service required = none
709 auth client required = none
20effc67 710 ms mon client mode = crc
7c673cae 711EOF
9f95a23c
TL
712 fi
713 if [ "$short" -eq 1 ]; then
714 COSDSHORT=" osd max object name len = 460
7c673cae 715 osd max object namespace len = 64"
9f95a23c
TL
716 fi
717 if [ "$objectstore" == "bluestore" ]; then
718 if [ "$spdk_enabled" -eq 1 ]; then
9f95a23c 719 BLUESTORE_OPTS=" bluestore_block_db_path = \"\"
11fdf7f2
TL
720 bluestore_block_db_size = 0
721 bluestore_block_db_create = false
722 bluestore_block_wal_path = \"\"
723 bluestore_block_wal_size = 0
724 bluestore_block_wal_create = false
9f95a23c
TL
725 bluestore_spdk_mem = 2048"
726 else
727 BLUESTORE_OPTS=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
11fdf7f2
TL
728 bluestore block db size = 1073741824
729 bluestore block db create = true
730 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
731 bluestore block wal size = 1048576000
732 bluestore block wal create = true"
11fdf7f2 733 fi
f67539c2
TL
734 if [ "$zoned_enabled" -eq 1 ]; then
735 BLUESTORE_OPTS+="
736 bluestore min alloc size = 65536
737 bluestore prefer deferred size = 0
738 bluestore prefer deferred size hdd = 0
739 bluestore prefer deferred size ssd = 0
740 bluestore allocator = zoned"
741 fi
742 if [ "$io_uring_enabled" -eq 1 ]; then
743 BLUESTORE_OPTS+="
744 bdev ioring = true"
745 fi
9f95a23c
TL
746 fi
747 wconf <<EOF
7c673cae
FG
748[client]
749 keyring = $keyring_fn
750 log file = $CEPH_OUT_DIR/\$name.\$pid.log
c07f9fc5 751 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
9f95a23c 752
11fdf7f2 753 ; needed for s3tests
9f95a23c 754 rgw crypt s3 kms backend = testing
11fdf7f2
TL
755 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
756 rgw crypt require ssl = false
757 ; uncomment the following to set LC days as the value in seconds;
758 ; needed for passing lc time based s3-tests (can be verbose)
759 ; rgw lc debug interval = 10
f67539c2 760 $(format_conf "${extra_conf}")
9f95a23c 761EOF
9f95a23c
TL
762 do_rgw_conf
763 wconf << EOF
7c673cae
FG
764[mds]
765$DAEMONOPTS
7c673cae
FG
766 mds data = $CEPH_DEV_DIR/mds.\$id
767 mds root ino uid = `id -u`
768 mds root ino gid = `id -g`
f67539c2 769 $(format_conf "${extra_conf}")
7c673cae 770[mgr]
20effc67 771 mgr disabled modules = rook
7c673cae
FG
772 mgr data = $CEPH_DEV_DIR/mgr.\$id
773 mgr module path = $MGR_PYTHON_PATH
9f95a23c 774 cephadm path = $CEPH_ROOT/src/cephadm/cephadm
7c673cae 775$DAEMONOPTS
f67539c2 776 $(format_conf "${extra_conf}")
7c673cae
FG
777[osd]
778$DAEMONOPTS
779 osd_check_max_object_name_len_on_startup = false
780 osd data = $CEPH_DEV_DIR/osd\$id
781 osd journal = $CEPH_DEV_DIR/osd\$id/journal
782 osd journal size = 100
783 osd class tmp = out
784 osd class dir = $OBJCLASS_PATH
785 osd class load list = *
786 osd class default list = *
92f5a8d4 787 osd fast shutdown = false
11fdf7f2 788
7c673cae
FG
789 filestore wbthrottle xfs ios start flusher = 10
790 filestore wbthrottle xfs ios hard limit = 20
791 filestore wbthrottle xfs inodes hard limit = 30
792 filestore wbthrottle btrfs ios start flusher = 10
793 filestore wbthrottle btrfs ios hard limit = 20
794 filestore wbthrottle btrfs inodes hard limit = 30
7c673cae
FG
795 bluestore fsck on mount = true
796 bluestore block create = true
11fdf7f2
TL
797$BLUESTORE_OPTS
798
799 ; kstore
800 kstore fsck on mount = true
801 osd objectstore = $objectstore
7c673cae 802$COSDSHORT
f67539c2 803 $(format_conf "${extra_conf}")
7c673cae 804[mon]
20effc67 805 mon_data_avail_crit = 1
11fdf7f2 806 mgr initial modules = $mgr_modules
7c673cae
FG
807$DAEMONOPTS
808$CMONDEBUG
f67539c2 809 $(format_conf "${extra_conf}")
7c673cae 810 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
11fdf7f2 811 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
f67539c2
TL
812 auth allow insecure global id reclaim = false
813EOF
814}
815
816write_logrotate_conf() {
817 out_dir=$(pwd)"/out/*.log"
818
819 cat << EOF
820$out_dir
821{
822 rotate 5
823 size 1G
824 copytruncate
825 compress
826 notifempty
827 missingok
828 sharedscripts
829 postrotate
830 # NOTE: assuring that the absence of one of the following processes
831 # won't abort the logrotate command.
832 killall -u $USER -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || echo ""
833 endscript
834}
7c673cae
FG
835EOF
836}
837
f67539c2
TL
838init_logrotate() {
839 logrotate_conf_path=$(pwd)"/logrotate.conf"
840 logrotate_state_path=$(pwd)"/logrotate.state"
841
842 if ! test -a $logrotate_conf_path; then
843 if test -a $logrotate_state_path; then
844 rm -f $logrotate_state_path
845 fi
846 write_logrotate_conf > $logrotate_conf_path
847 fi
848}
849
7c673cae
FG
850start_mon() {
851 local MONS=""
852 local count=0
853 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
854 do
855 [ $count -eq $CEPH_NUM_MON ] && break;
856 count=$(($count + 1))
9f95a23c
TL
857 if [ -z "$MONS" ]; then
858 MONS="$f"
7c673cae 859 else
9f95a23c 860 MONS="$MONS $f"
7c673cae
FG
861 fi
862 done
863
864 if [ "$new" -eq 1 ]; then
9f95a23c
TL
865 if [ `echo $IP | grep '^127\\.'` ]; then
866 echo
867 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
868 echo " connect. either adjust /etc/hosts, or edit this script to use your"
869 echo " machine's real IP."
870 echo
871 fi
872
873 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
874 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
875 --cap mon 'allow *' \
876 --cap osd 'allow *' \
877 --cap mds 'allow *' \
878 --cap mgr 'allow *' \
879 "$keyring_fn"
880
881 # build a fresh fs monmap, mon fs
882 local params=()
883 local count=0
884 local mon_host=""
885 for f in $MONS
886 do
887 if [ $msgr -eq 1 ]; then
888 A="v1:$IP:$(($CEPH_PORT+$count+1))"
889 fi
890 if [ $msgr -eq 2 ]; then
891 A="v2:$IP:$(($CEPH_PORT+$count+1))"
892 fi
893 if [ $msgr -eq 21 ]; then
894 A="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
895 fi
896 params+=("--addv" "$f" "$A")
897 mon_host="$mon_host $A"
898 wconf <<EOF
7c673cae
FG
899[mon.$f]
900 host = $HOSTNAME
901 mon data = $CEPH_DEV_DIR/mon.$f
7c673cae 902EOF
9f95a23c
TL
903 count=$(($count + 2))
904 done
905 wconf <<EOF
11fdf7f2
TL
906[global]
907 mon host = $mon_host
908EOF
9f95a23c 909 prun "$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
7c673cae 910
9f95a23c
TL
911 for f in $MONS
912 do
913 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
914 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
915 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
916 done
7c673cae 917
9f95a23c
TL
918 prun rm -- "$monmap_fn"
919 fi
7c673cae 920
9f95a23c
TL
921 # start monitors
922 for f in $MONS
923 do
924 run 'mon' $f $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
925 done
7c673cae
FG
926}
927
928start_osd() {
9f95a23c
TL
929 if [ $inc_osd_num -gt 0 ]; then
930 old_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
931 start=$old_maxosd
932 end=$(($start-1+$inc_osd_num))
933 overwrite_conf=1 # fake wconf
934 else
935 start=0
936 end=$(($CEPH_NUM_OSD-1))
937 fi
938 local osds_wait
939 for osd in `seq $start $end`
7c673cae 940 do
9f95a23c
TL
941 local extra_seastar_args
942 if [ "$ceph_osd" == "crimson-osd" ]; then
943 # designate a single CPU node $osd for osd.$osd
944 extra_seastar_args="--smp 1 --cpuset $osd"
945 if [ "$debug" -ne 0 ]; then
946 extra_seastar_args+=" --debug"
947 fi
20effc67
TL
948 if [ "$trace" -ne 0 ]; then
949 extra_seastar_args+=" --trace"
950 fi
9f95a23c
TL
951 fi
952 if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
953 wconf <<EOF
7c673cae
FG
954[osd.$osd]
955 host = $HOSTNAME
956EOF
9f95a23c
TL
957 if [ "$spdk_enabled" -eq 1 ]; then
958 wconf <<EOF
f67539c2 959 bluestore_block_path = spdk:${bluestore_spdk_dev[$osd]}
9f95a23c
TL
960EOF
961 fi
7c673cae
FG
962
963 rm -rf $CEPH_DEV_DIR/osd$osd || true
964 if command -v btrfs > /dev/null; then
965 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
966 fi
9f95a23c
TL
967 if [ -n "$filestore_path" ]; then
968 ln -s $filestore_path $CEPH_DEV_DIR/osd$osd
969 elif [ -n "$kstore_path" ]; then
970 ln -s $kstore_path $CEPH_DEV_DIR/osd$osd
971 else
972 mkdir -p $CEPH_DEV_DIR/osd$osd
20effc67
TL
973 if [ -n "${block_devs[$osd]}" ]; then
974 dd if=/dev/zero of=${block_devs[$osd]} bs=1M count=1
975 ln -s ${block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block
976 fi
977 if [ -n "${secondary_block_devs[$osd]}" ]; then
978 dd if=/dev/zero of=${secondary_block_devs[$osd]} bs=1M count=1
979 ln -s ${secondary_block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block.segmented.1
980 fi
981 fi
982 if [ "$objectstore" == "bluestore" ]; then
983 wconf <<EOF
9f95a23c
TL
984 bluestore fsck on mount = false
985EOF
9f95a23c 986 fi
7c673cae
FG
987
988 local uuid=`uuidgen`
989 echo "add osd$osd $uuid"
9f95a23c
TL
990 OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
991 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
11fdf7f2 992 ceph_adm osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
9f95a23c 993 rm $CEPH_DEV_DIR/osd$osd/new.json
20effc67 994 prun $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args
7c673cae
FG
995
996 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
9f95a23c 997 cat > $key_fn<<EOF
3efd9988 998[osd.$osd]
9f95a23c 999 key = $OSD_SECRET
3efd9988 1000EOF
7c673cae 1001 fi
31f18b77 1002 echo start osd.$osd
9f95a23c
TL
1003 local osd_pid
1004 run 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
1005 $extra_seastar_args $extra_osd_args \
1006 -i $osd $ARGS $COSD_ARGS &
1007 osd_pid=$!
1008 if $parallel; then
1009 osds_wait=$osd_pid
1010 else
1011 wait $osd_pid
1012 fi
7c673cae 1013 done
9f95a23c
TL
1014 if $parallel; then
1015 for p in $osds_wait; do
1016 wait $p
1017 done
1018 debug echo OSDs started
1019 fi
1020 if [ $inc_osd_num -gt 0 ]; then
1021 # update num osd
1022 new_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1023 sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
1024 fi
7c673cae
FG
1025}
1026
20effc67
TL
1027create_mgr_restful_secret() {
1028 while ! ceph_adm -h | grep -c -q ^restful ; do
1029 debug echo 'waiting for mgr restful module to start'
1030 sleep 1
1031 done
1032 local secret_file
1033 if ceph_adm restful create-self-signed-cert > /dev/null; then
1034 secret_file=`mktemp`
1035 ceph_adm restful create-key admin -o $secret_file
1036 RESTFUL_SECRET=`cat $secret_file`
1037 rm $secret_file
1038 else
1039 debug echo MGR Restful is not working, perhaps the package is not installed?
1040 fi
1041}
1042
7c673cae
FG
1043start_mgr() {
1044 local mgr=0
11fdf7f2 1045 local ssl=${DASHBOARD_SSL:-1}
31f18b77
FG
1046 # avoid monitors on nearby ports (which test/*.sh use extensively)
1047 MGR_PORT=$(($CEPH_PORT + 1000))
9f95a23c 1048 PROMETHEUS_PORT=9283
7c673cae
FG
1049 for name in x y z a b c d e f g h i j k l m n o p
1050 do
1051 [ $mgr -eq $CEPH_NUM_MGR ] && break
1052 mgr=$(($mgr + 1))
1053 if [ "$new" -eq 1 ]; then
1054 mkdir -p $CEPH_DEV_DIR/mgr.$name
1055 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
1056 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
1057 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
7c673cae 1058
11fdf7f2 1059 wconf <<EOF
7c673cae
FG
1060[mgr.$name]
1061 host = $HOSTNAME
1062EOF
1063
11fdf7f2
TL
1064 if $with_mgr_dashboard ; then
1065 local port_option="ssl_server_port"
1066 local http_proto="https"
1067 if [ "$ssl" == "0" ]; then
1068 port_option="server_port"
1069 http_proto="http"
1070 ceph_adm config set mgr mgr/dashboard/ssl false --force
1071 fi
1072 ceph_adm config set mgr mgr/dashboard/$name/$port_option $MGR_PORT --force
1073 if [ $mgr -eq 1 ]; then
1074 DASH_URLS="$http_proto://$IP:$MGR_PORT"
1075 else
1076 DASH_URLS+=", $http_proto://$IP:$MGR_PORT"
1077 fi
1078 fi
1079 MGR_PORT=$(($MGR_PORT + 1000))
9f95a23c
TL
1080 ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
1081 PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
31f18b77 1082
11fdf7f2
TL
1083 ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
1084 if [ $mgr -eq 1 ]; then
1085 RESTFUL_URLS="https://$IP:$MGR_PORT"
1086 else
1087 RESTFUL_URLS+=", https://$IP:$MGR_PORT"
1088 fi
1089 MGR_PORT=$(($MGR_PORT + 1000))
1090 fi
31f18b77 1091
9f95a23c
TL
1092 debug echo "Starting mgr.${name}"
1093 run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
7c673cae 1094 done
31f18b77 1095
11fdf7f2
TL
1096 if [ "$new" -eq 1 ]; then
1097 # setting login credentials for dashboard
1098 if $with_mgr_dashboard; then
9f95a23c
TL
1099 while ! ceph_adm -h | grep -c -q ^dashboard ; do
1100 debug echo 'waiting for mgr dashboard module to start'
1101 sleep 1
1102 done
cd265ab1
TL
1103 DASHBOARD_ADMIN_SECRET_FILE="${CEPH_CONF_PATH}/dashboard-admin-secret.txt"
1104 printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
1105 ceph_adm dashboard ac-user-create admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" \
1106 administrator --force-password
11fdf7f2 1107 if [ "$ssl" != "0" ]; then
9f95a23c
TL
1108 if ! ceph_adm dashboard create-self-signed-cert; then
1109 debug echo dashboard module not working correctly!
11fdf7f2 1110 fi
7c673cae 1111 fi
11fdf7f2 1112 fi
20effc67
TL
1113 if $with_mgr_restful; then
1114 create_mgr_restful_secret
7c673cae
FG
1115 fi
1116 fi
9f95a23c
TL
1117
1118 if [ "$cephadm" -eq 1 ]; then
1119 debug echo Enabling cephadm orchestrator
f6b5b4d7
TL
1120 if [ "$new" -eq 1 ]; then
1121 digest=$(curl -s \
20effc67 1122 https://hub.docker.com/v2/repositories/ceph/daemon-base/tags/latest-master-devel \
f67539c2 1123 | jq -r '.images[0].digest')
f6b5b4d7
TL
1124 ceph_adm config set global container_image "docker.io/ceph/daemon-base@$digest"
1125 fi
9f95a23c
TL
1126 ceph_adm config-key set mgr/cephadm/ssh_identity_key -i ~/.ssh/id_rsa
1127 ceph_adm config-key set mgr/cephadm/ssh_identity_pub -i ~/.ssh/id_rsa.pub
1128 ceph_adm mgr module enable cephadm
1129 ceph_adm orch set backend cephadm
f6b5b4d7 1130 ceph_adm orch host add "$(hostname)"
9f95a23c
TL
1131 ceph_adm orch apply crash '*'
1132 ceph_adm config set mgr mgr/cephadm/allow_ptrace true
1133 fi
11fdf7f2 1134}
7c673cae 1135
11fdf7f2 1136start_mds() {
7c673cae
FG
1137 local mds=0
1138 for name in a b c d e f g h i j k l m n o p
1139 do
9f95a23c
TL
1140 [ $mds -eq $CEPH_NUM_MDS ] && break
1141 mds=$(($mds + 1))
7c673cae 1142
9f95a23c
TL
1143 if [ "$new" -eq 1 ]; then
1144 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
1145 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
1146 wconf <<EOF
7c673cae
FG
1147[mds.$name]
1148 host = $HOSTNAME
1149EOF
9f95a23c
TL
1150 if [ "$standby" -eq 1 ]; then
1151 mkdir -p $CEPH_DEV_DIR/mds.${name}s
1152 wconf <<EOF
1153 mds standby for rank = $mds
7c673cae
FG
1154[mds.${name}s]
1155 mds standby replay = true
1156 mds standby for name = ${name}
1157EOF
9f95a23c
TL
1158 fi
1159 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
1160 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow rw tag cephfs *=*' mds 'allow' mgr 'allow profile mds'
1161 if [ "$standby" -eq 1 ]; then
1162 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
1163 "$CEPH_DEV_DIR/mds.${name}s/keyring"
1164 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
1165 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
1166 fi
1167 fi
7c673cae 1168
9f95a23c
TL
1169 run 'mds' $name $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
1170 if [ "$standby" -eq 1 ]; then
1171 run 'mds' $name $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
1172 fi
7c673cae
FG
1173
1174 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
1175 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
1176 #ceph_adm mds set max_mds 2
1177 done
11fdf7f2
TL
1178
1179 if [ $new -eq 1 ]; then
1180 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
1181 sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
1182 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
1183 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
1184 fi
1185
9f95a23c
TL
1186 # wait for volume module to load
1187 while ! ceph_adm fs volume ls ; do sleep 1 ; done
11fdf7f2
TL
1188 local fs=0
1189 for name in a b c d e f g h i j k l m n o p
1190 do
9f95a23c
TL
1191 ceph_adm fs volume create ${name}
1192 ceph_adm fs authorize ${name} "client.fs_${name}" / rwp >> "$keyring_fn"
11fdf7f2
TL
1193 fs=$(($fs + 1))
1194 [ $fs -eq $CEPH_NUM_FS ] && break
1195 done
1196 fi
1197 fi
1198
7c673cae
FG
1199}
1200
9f95a23c 1201# Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
f6b5b4d7
TL
1202# nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
1203# Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
1204# the packages are available at
1205# https://wiki.centos.org/SpecialInterestGroup/Storage
1206# Similarly for Ubuntu>=16.04 follow the instructions on
1207# https://launchpad.net/~nfs-ganesha
9f95a23c
TL
1208
1209start_ganesha() {
f6b5b4d7 1210 cluster_id="vstart"
9f95a23c
TL
1211 GANESHA_PORT=$(($CEPH_PORT + 4000))
1212 local ganesha=0
f67539c2 1213 test_user="$cluster_id"
a4b75251 1214 pool_name=".nfs"
f6b5b4d7
TL
1215 namespace=$cluster_id
1216 url="rados://$pool_name/$namespace/conf-nfs.$test_user"
1217
1218 prun ceph_adm auth get-or-create client.$test_user \
1219 mon "allow r" \
1220 osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
1221 mds "allow rw path=/" \
1222 >> "$keyring_fn"
1223
1224 ceph_adm mgr module enable test_orchestrator
1225 ceph_adm orch set backend test_orchestrator
1226 ceph_adm test_orchestrator load_data -i $CEPH_ROOT/src/pybind/mgr/test_orchestrator/dummy_data.json
b3b6e05e 1227 prun ceph_adm nfs cluster create $cluster_id
a4b75251 1228 prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path "/cephfs"
9f95a23c
TL
1229
1230 for name in a b c d e f g h i j k l m n o p
1231 do
1232 [ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
1233
1234 port=$(($GANESHA_PORT + ganesha))
1235 ganesha=$(($ganesha + 1))
1236 ganesha_dir="$CEPH_DEV_DIR/ganesha.$name"
9f95a23c
TL
1237 prun rm -rf $ganesha_dir
1238 prun mkdir -p $ganesha_dir
1239
1240 echo "NFS_CORE_PARAM {
f6b5b4d7
TL
1241 Enable_NLM = false;
1242 Enable_RQUOTA = false;
1243 Protocols = 4;
1244 NFS_Port = $port;
1245 }
1246
1247 MDCACHE {
1248 Dir_Chunk = 0;
1249 }
1250
1251 NFSv4 {
1252 RecoveryBackend = rados_cluster;
1253 Minor_Versions = 1, 2;
1254 }
1255
f6b5b4d7 1256 RADOS_KV {
a4b75251 1257 pool = '$pool_name';
f6b5b4d7
TL
1258 namespace = $namespace;
1259 UserId = $test_user;
1260 nodeid = $name;
1261 }
1262
1263 RADOS_URLS {
1264 Userid = $test_user;
f67539c2
TL
1265 watch_url = '$url';
1266 }
1267
1268 %url $url" > "$ganesha_dir/ganesha-$name.conf"
9f95a23c
TL
1269 wconf <<EOF
1270[ganesha.$name]
1271 host = $HOSTNAME
1272 ip = $IP
1273 port = $port
1274 ganesha data = $ganesha_dir
a4b75251 1275 pid file = $CEPH_OUT_DIR/ganesha-$name.pid
9f95a23c
TL
1276EOF
1277
f6b5b4d7
TL
1278 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace add $name
1279 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
9f95a23c 1280
f6b5b4d7 1281 prun env CEPH_CONF="${conf_fn}" ganesha.nfsd -L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
9f95a23c
TL
1282
1283 # Wait few seconds for grace period to be removed
1284 sleep 2
f6b5b4d7
TL
1285
1286 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
9f95a23c 1287
f6b5b4d7 1288 echo "$test_user ganesha daemon $name started on port: $port"
9f95a23c 1289 done
9f95a23c
TL
1290}
1291
7c673cae
FG
1292if [ "$debug" -eq 0 ]; then
1293 CMONDEBUG='
1294 debug mon = 10
1295 debug ms = 1'
7c673cae 1296else
9f95a23c 1297 debug echo "** going verbose **"
7c673cae
FG
1298 CMONDEBUG='
1299 debug mon = 20
1300 debug paxos = 20
1301 debug auth = 20
9f95a23c 1302 debug mgrc = 20
7c673cae 1303 debug ms = 1'
7c673cae
FG
1304fi
1305
1306if [ -n "$MON_ADDR" ]; then
9f95a23c
TL
1307 CMON_ARGS=" -m "$MON_ADDR
1308 COSD_ARGS=" -m "$MON_ADDR
1309 CMDS_ARGS=" -m "$MON_ADDR
7c673cae
FG
1310fi
1311
9f95a23c
TL
1312if [ -z "$CEPH_PORT" ]; then
1313 while [ true ]
1314 do
1315 CEPH_PORT="$(echo $(( RANDOM % 1000 + 40000 )))"
1316 ss -a -n | egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev/null 2>&1 || break
1317 done
7c673cae
FG
1318fi
1319
1320[ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
1321
1322# sudo if btrfs
9f95a23c 1323[ -d $CEPH_DEV_DIR/osd0/. ] && [ -e $CEPH_DEV_DIR/sudo ] && SUDO="sudo"
7c673cae 1324
9f95a23c
TL
1325if [ $inc_osd_num -eq 0 ]; then
1326 prun $SUDO rm -f core*
1327fi
7c673cae 1328
9f95a23c
TL
1329[ -d $CEPH_ASOK_DIR ] || mkdir -p $CEPH_ASOK_DIR
1330[ -d $CEPH_OUT_DIR ] || mkdir -p $CEPH_OUT_DIR
1331[ -d $CEPH_DEV_DIR ] || mkdir -p $CEPH_DEV_DIR
1332if [ $inc_osd_num -eq 0 ]; then
f67539c2 1333 $SUDO find "$CEPH_OUT_DIR" -type f -delete
9f95a23c
TL
1334fi
1335[ -d gmon ] && $SUDO rm -rf gmon/*
7c673cae 1336
9f95a23c 1337[ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
7c673cae
FG
1338
1339
1340# figure machine's ip
1341HOSTNAME=`hostname -s`
1342if [ -n "$ip" ]; then
1343 IP="$ip"
1344else
1345 echo hostname $HOSTNAME
1346 if [ -x "$(which ip 2>/dev/null)" ]; then
9f95a23c 1347 IP_CMD="ip addr"
7c673cae 1348 else
9f95a23c 1349 IP_CMD="ifconfig"
7c673cae 1350 fi
9f95a23c 1351 # filter out IPv4 and localhost addresses
7c673cae
FG
1352 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
1353 # if nothing left, try using localhost address, it might work
1354 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
1355fi
1356echo "ip $IP"
1357echo "port $CEPH_PORT"
1358
1359
1360[ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
1361
1362ceph_adm() {
1363 if [ "$cephx" -eq 1 ]; then
1364 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
1365 else
1366 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
1367 fi
1368}
1369
9f95a23c
TL
1370if [ $inc_osd_num -gt 0 ]; then
1371 start_osd
1372 exit
1373fi
1374
7c673cae
FG
1375if [ "$new" -eq 1 ]; then
1376 prepare_conf
1377fi
1378
1379if [ $CEPH_NUM_MON -gt 0 ]; then
1380 start_mon
11fdf7f2 1381
9f95a23c 1382 debug echo Populating config ...
11fdf7f2
TL
1383 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1384[global]
1385osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
1386osd_pool_default_min_size = 1
11fdf7f2
TL
1387
1388[mon]
1389mon_osd_reporter_subtree_level = osd
1390mon_data_avail_warn = 2
1391mon_data_avail_crit = 1
1392mon_allow_pool_delete = true
f67539c2 1393mon_allow_pool_size_one = true
11fdf7f2
TL
1394
1395[osd]
1396osd_scrub_load_threshold = 2000
1397osd_debug_op_order = true
1398osd_debug_misdirected_ops = true
1399osd_copyfrom_max_chunk = 524288
1400
1401[mds]
1402mds_debug_frag = true
1403mds_debug_auth_pins = true
1404mds_debug_subtrees = true
1405
9f95a23c
TL
1406[mgr]
1407mgr/telemetry/nag = false
1408mgr/telemetry/enable = false
1409
11fdf7f2
TL
1410EOF
1411
1412 if [ "$debug" -ne 0 ]; then
9f95a23c
TL
1413 debug echo Setting debug configs ...
1414 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
11fdf7f2
TL
1415[mgr]
1416debug_ms = 1
1417debug_mgr = 20
1418debug_monc = 20
1419debug_mon = 20
1420
1421[osd]
1422debug_ms = 1
1423debug_osd = 25
1424debug_objecter = 20
1425debug_monc = 20
1426debug_mgrc = 20
1427debug_journal = 20
1428debug_filestore = 20
1429debug_bluestore = 20
1430debug_bluefs = 20
1431debug_rocksdb = 20
1432debug_bdev = 20
1433debug_reserver = 10
1434debug_objclass = 20
1435
1436[mds]
1437debug_ms = 1
1438debug_mds = 20
1439debug_monc = 20
1440debug_mgrc = 20
1441mds_debug_scatterstat = true
1442mds_verify_scatter = true
1443EOF
1444 fi
f6b5b4d7
TL
1445 if [ "$cephadm" -gt 0 ]; then
1446 debug echo Setting mon public_network ...
1447 public_network=$(ip route list | grep -w "$IP" | awk '{print $1}')
1448 ceph_adm config set mon public_network $public_network
1449 fi
7c673cae
FG
1450fi
1451
c07f9fc5
FG
1452if [ $CEPH_NUM_MGR -gt 0 ]; then
1453 start_mgr
1454fi
1455
7c673cae
FG
1456# osd
1457if [ $CEPH_NUM_OSD -gt 0 ]; then
1458 start_osd
1459fi
1460
1461# mds
1462if [ "$smallmds" -eq 1 ]; then
1463 wconf <<EOF
1464[mds]
9f95a23c
TL
1465 mds log max segments = 2
1466 # Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
1467 mds cache memory limit = 100M
7c673cae
FG
1468EOF
1469fi
1470
1471if [ $CEPH_NUM_MDS -gt 0 ]; then
1472 start_mds
9f95a23c
TL
1473 # key with access to all FS
1474 ceph_adm fs authorize \* "client.fs" / rwp >> "$keyring_fn"
7c673cae
FG
1475fi
1476
1477# Don't set max_mds until all the daemons are started, otherwise
1478# the intended standbys might end up in active roles.
1479if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1480 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
1481fi
1482fs=0
1483for name in a b c d e f g h i j k l m n o p
1484do
1485 [ $fs -eq $CEPH_NUM_FS ] && break
1486 fs=$(($fs + 1))
1487 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
f91f0fd5 1488 ceph_adm fs set "${name}" max_mds "$CEPH_MAX_MDS"
7c673cae
FG
1489 fi
1490done
1491
1492# mgr
1493
7c673cae
FG
1494if [ "$ec" -eq 1 ]; then
1495 ceph_adm <<EOF
1496osd erasure-code-profile set ec-profile m=2 k=2
9f95a23c 1497osd pool create ec erasure ec-profile
7c673cae
FG
1498EOF
1499fi
1500
1501do_cache() {
1502 while [ -n "$*" ]; do
9f95a23c
TL
1503 p="$1"
1504 shift
1505 debug echo "creating cache for pool $p ..."
1506 ceph_adm <<EOF
1507osd pool create ${p}-cache
7c673cae
FG
1508osd tier add $p ${p}-cache
1509osd tier cache-mode ${p}-cache writeback
1510osd tier set-overlay $p ${p}-cache
1511EOF
1512 done
1513}
1514do_cache $cache
1515
1516do_hitsets() {
1517 while [ -n "$*" ]; do
9f95a23c
TL
1518 pool="$1"
1519 type="$2"
1520 shift
1521 shift
1522 debug echo "setting hit_set on pool $pool type $type ..."
1523 ceph_adm <<EOF
7c673cae
FG
1524osd pool set $pool hit_set_type $type
1525osd pool set $pool hit_set_count 8
1526osd pool set $pool hit_set_period 30
1527EOF
1528 done
1529}
1530do_hitsets $hitset
1531
a4b75251
TL
1532do_rgw_create_bucket()
1533{
1534 # Create RGW Bucket
1535 local rgw_python_file='rgw-create-bucket.py'
1536 echo "import boto
1537import boto.s3.connection
1538
1539conn = boto.connect_s3(
1540 aws_access_key_id = '$s3_akey',
1541 aws_secret_access_key = '$s3_skey',
1542 host = '$HOSTNAME',
1543 port = 80,
1544 is_secure=False,
1545 calling_format = boto.s3.connection.OrdinaryCallingFormat(),
1546 )
1547
1548bucket = conn.create_bucket('nfs-bucket')
1549print('created new bucket')" > "$CEPH_OUT_DIR/$rgw_python_file"
1550 prun python $CEPH_OUT_DIR/$rgw_python_file
1551}
1552
224ce89b 1553do_rgw_create_users()
7c673cae
FG
1554{
1555 # Create S3 user
a4b75251
TL
1556 s3_akey='0555b35654ad1656d804'
1557 s3_skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
9f95a23c 1558 debug echo "setting up user testid"
a4b75251 1559 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $s3_akey --secret $s3_skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
7c673cae
FG
1560
1561 # Create S3-test users
1562 # See: https://github.com/ceph/s3-tests
9f95a23c 1563 debug echo "setting up s3-test users"
7c673cae
FG
1564 $CEPH_BIN/radosgw-admin user create \
1565 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1566 --access-key ABCDEFGHIJKLMNOPQRST \
1567 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
1568 --display-name youruseridhere \
1569 --email s3@example.com -c $conf_fn > /dev/null
1570 $CEPH_BIN/radosgw-admin user create \
1571 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
1572 --access-key NOPQRSTUVWXYZABCDEFG \
1573 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
1574 --display-name john.doe \
1575 --email john.doe@example.com -c $conf_fn > /dev/null
31f18b77
FG
1576 $CEPH_BIN/radosgw-admin user create \
1577 --tenant testx \
1578 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1579 --access-key HIJKLMNOPQRSTUVWXYZA \
1580 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
1581 --display-name tenanteduser \
1582 --email tenanteduser@example.com -c $conf_fn > /dev/null
7c673cae
FG
1583
1584 # Create Swift user
9f95a23c 1585 debug echo "setting up user tester"
7c673cae
FG
1586 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
1587
1588 echo ""
1589 echo "S3 User Info:"
a4b75251
TL
1590 echo " access key: $s3_akey"
1591 echo " secret key: $s3_skey"
7c673cae
FG
1592 echo ""
1593 echo "Swift User Info:"
1594 echo " account : test"
1595 echo " user : tester"
1596 echo " password : testing"
1597 echo ""
224ce89b 1598}
7c673cae 1599
224ce89b
WB
1600do_rgw()
1601{
1602 if [ "$new" -eq 1 ]; then
9f95a23c 1603 do_rgw_create_users
224ce89b 1604 if [ -n "$rgw_compression" ]; then
9f95a23c 1605 debug echo "setting compression type=$rgw_compression"
224ce89b
WB
1606 $CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
1607 fi
1608 fi
7c673cae 1609 # Start server
b3b6e05e
TL
1610 if [ "$cephadm" -gt 0 ]; then
1611 ceph_adm orch apply rgw rgwTest
1612 return
1613 fi
1614
7c673cae
FG
1615 RGWDEBUG=""
1616 if [ "$debug" -ne 0 ]; then
9f95a23c 1617 RGWDEBUG="--debug-rgw=20 --debug-ms=1"
7c673cae
FG
1618 fi
1619
11fdf7f2
TL
1620 local CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT}"
1621 local CEPH_RGW_HTTPS="${CEPH_RGW_PORT: -1}"
1622 if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
1623 CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT::-1}"
1624 else
1625 CEPH_RGW_HTTPS=""
1626 fi
7c673cae 1627 RGWSUDO=
11fdf7f2 1628 [ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO=sudo
9f95a23c
TL
1629
1630 current_port=$CEPH_RGW_PORT
1631 for n in $(seq 1 $CEPH_NUM_RGW); do
1632 rgw_name="client.rgw.${current_port}"
1633
1634 ceph_adm auth get-or-create $rgw_name \
1635 mon 'allow rw' \
1636 osd 'allow rwx' \
1637 mgr 'allow rw' \
1638 >> "$keyring_fn"
1639
1640 debug echo start rgw on http${CEPH_RGW_HTTPS}://localhost:${current_port}
1641 run 'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn \
1642 --log-file=${CEPH_OUT_DIR}/radosgw.${current_port}.log \
1643 --admin-socket=${CEPH_OUT_DIR}/radosgw.${current_port}.asok \
1644 --pid-file=${CEPH_OUT_DIR}/radosgw.${current_port}.pid \
f67539c2 1645 --rgw_luarocks_location=${CEPH_OUT_DIR}/luarocks \
9f95a23c
TL
1646 ${RGWDEBUG} \
1647 -n ${rgw_name} \
1648 "--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}"
1649
11fdf7f2 1650 i=$(($i + 1))
224ce89b 1651 [ $i -eq $CEPH_NUM_RGW ] && break
9f95a23c
TL
1652
1653 current_port=$((current_port+1))
7c673cae
FG
1654 done
1655}
1656if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1657 do_rgw
1658fi
1659
a4b75251
TL
1660# Ganesha Daemons
1661if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
1662 pseudo_path="/cephfs"
1663 if [ "$cephadm" -gt 0 ]; then
1664 cluster_id="vstart"
1665 port="2049"
1666 prun ceph_adm nfs cluster create $cluster_id
1667 if [ $CEPH_NUM_MDS -gt 0 ]; then
1668 prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path $pseudo_path
1669 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1670 fi
1671 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1672 pseudo_path="/rgw"
1673 do_rgw_create_bucket
1674 prun ceph_adm nfs export create rgw --cluster-id $cluster_id --pseudo-path $pseudo_path --bucket "nfs-bucket"
1675 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1676 fi
1677 else
1678 start_ganesha
1679 echo "Mount using: mount -t nfs -o port=<ganesha-port-num> $IP:$pseudo_path mountpoint"
1680 fi
1681fi
f67539c2 1682
a4b75251 1683docker_service(){
f67539c2
TL
1684 local service=''
1685 #prefer podman
20effc67 1686 if command -v podman > /dev/null; then
f67539c2
TL
1687 service="podman"
1688 elif pgrep -f docker > /dev/null; then
1689 service="docker"
1690 fi
1691 if [ -n "$service" ]; then
1692 echo "using $service for deploying jaeger..."
1693 #check for exited container, remove them and restart container
1694 if [ "$($service ps -aq -f status=exited -f name=jaeger)" ]; then
1695 $service rm jaeger
1696 fi
1697 if [ ! "$(podman ps -aq -f name=jaeger)" ]; then
1698 $service "$@"
1699 fi
1700 else
1701 echo "cannot find docker or podman, please restart service and rerun."
1702 fi
a4b75251 1703}
f67539c2
TL
1704
1705echo ""
1706if [ $with_jaeger -eq 1 ]; then
1707 debug echo "Enabling jaegertracing..."
1708 docker_service run -d --name jaeger \
1709 -p 5775:5775/udp \
1710 -p 6831:6831/udp \
1711 -p 6832:6832/udp \
1712 -p 5778:5778 \
1713 -p 16686:16686 \
1714 -p 14268:14268 \
1715 -p 14250:14250 \
20effc67 1716 quay.io/jaegertracing/all-in-one
f67539c2
TL
1717fi
1718
9f95a23c 1719debug echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
7c673cae 1720
31f18b77 1721echo ""
11fdf7f2
TL
1722if [ "$new" -eq 1 ]; then
1723 if $with_mgr_dashboard; then
20effc67
TL
1724 cat <<EOF
1725dashboard urls: $DASH_URLS
1726 w/ user/pass: admin / admin
1727EOF
1728 fi
1729 if $with_mgr_restful; then
1730 cat <<EOF
1731restful urls: $RESTFUL_URLS
1732 w/ user/pass: admin / $RESTFUL_SECRET
1733EOF
11fdf7f2 1734 fi
11fdf7f2 1735fi
20effc67 1736
7c673cae 1737echo ""
9f95a23c
TL
1738# add header to the environment file
1739{
1740 echo "#"
1741 echo "# source this file into your shell to set up the environment."
1742 echo "# For example:"
1743 echo "# $ . $CEPH_DIR/vstart_environment.sh"
1744 echo "#"
1745} > $CEPH_DIR/vstart_environment.sh
1746{
1747 echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
1748 echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
f67539c2 1749 echo "export PATH=$CEPH_DIR/bin:\$PATH"
20effc67
TL
1750 echo "export CEPH_CONF=$conf_fn"
1751 echo "export CEPH_KEYRING=$keyring_fn"
9f95a23c
TL
1752
1753 if [ -n "$CEPHFS_SHELL" ]; then
1754 echo "alias cephfs-shell=$CEPHFS_SHELL"
1755 fi
1756} | tee -a $CEPH_DIR/vstart_environment.sh
31f18b77
FG
1757
1758echo "CEPH_DEV=1"
1759
9f95a23c
TL
1760# always keep this section at the very bottom of this file
1761STRAY_CONF_PATH="/etc/ceph/ceph.conf"
1762if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
1763 echo ""
1764 echo ""
1765 echo "WARNING:"
1766 echo " Please remove stray $STRAY_CONF_PATH if not needed."
1767 echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
1768 echo " and may lead to undesired results."
1769 echo ""
1770 echo "NOTE:"
1771 echo " Remember to restart cluster after removing $STRAY_CONF_PATH"
1772fi
f67539c2
TL
1773
1774init_logrotate