]> git.proxmox.com Git - ceph.git/blame - ceph/src/vstart.sh
bump version to 18.2.2-pve1
[ceph.git] / ceph / src / vstart.sh
CommitLineData
11fdf7f2 1#!/usr/bin/env bash
9f95a23c
TL
2# -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
3# vim: softtabstop=4 shiftwidth=4 expandtab
7c673cae
FG
4
5# abort on failure
6set -e
7
9f95a23c
TL
8quoted_print() {
9 for s in "$@"; do
10 if [[ "$s" =~ \ ]]; then
11 printf -- "'%s' " "$s"
12 else
13 printf -- "$s "
14 fi
15 done
16 printf '\n'
17}
18
19debug() {
20 "$@" >&2
21}
22
23prunb() {
24 debug quoted_print "$@" '&'
20effc67 25 PATH=$CEPH_BIN:$PATH "$@" &
9f95a23c
TL
26}
27
28prun() {
29 debug quoted_print "$@"
20effc67 30 PATH=$CEPH_BIN:$PATH "$@"
9f95a23c
TL
31}
32
33
7c673cae 34if [ -n "$VSTART_DEST" ]; then
9f95a23c
TL
35 SRC_PATH=`dirname $0`
36 SRC_PATH=`(cd $SRC_PATH; pwd)`
7c673cae 37
9f95a23c 38 CEPH_DIR=$SRC_PATH
1e59de90
TL
39 CEPH_BIN=${CEPH_BIN:-${PWD}/bin}
40 CEPH_LIB=${CEPH_LIB:-${PWD}/lib}
7c673cae 41
9f95a23c
TL
42 CEPH_CONF_PATH=$VSTART_DEST
43 CEPH_DEV_DIR=$VSTART_DEST/dev
44 CEPH_OUT_DIR=$VSTART_DEST/out
1e59de90
TL
45 CEPH_ASOK_DIR=$VSTART_DEST/asok
46 CEPH_OUT_CLIENT_DIR=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
7c673cae
FG
47fi
48
11fdf7f2
TL
49get_cmake_variable() {
50 local variable=$1
9f95a23c 51 grep "${variable}:" CMakeCache.txt | cut -d "=" -f 2
11fdf7f2
TL
52}
53
7c673cae
FG
54# for running out of the CMake build directory
55if [ -e CMakeCache.txt ]; then
9f95a23c
TL
56 # Out of tree build, learn source location from CMakeCache.txt
57 CEPH_ROOT=$(get_cmake_variable ceph_SOURCE_DIR)
58 CEPH_BUILD_DIR=`pwd`
59 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
7c673cae
FG
60fi
61
9f95a23c 62# use CEPH_BUILD_ROOT to vstart from a 'make install'
7c673cae 63if [ -n "$CEPH_BUILD_ROOT" ]; then
9f95a23c
TL
64 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
65 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
f67539c2 66 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_ROOT/external/lib
9f95a23c
TL
67 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
68 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
69 # make install should install python extensions into PYTHONPATH
7c673cae 70elif [ -n "$CEPH_ROOT" ]; then
1e59de90 71 [ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL=$CEPH_ROOT/src/tools/cephfs/shell/cephfs-shell
9f95a23c
TL
72 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
73 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
74 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
75 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
76 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
f67539c2 77 [ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_DIR/external/lib
9f95a23c
TL
78 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
79 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
80 [ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON=$CEPH_ROOT/src/python-common
7c673cae
FG
81fi
82
83if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
84 PATH=$(pwd):$PATH
85fi
86
87[ -z "$PYBIND" ] && PYBIND=./pybind
88
9f95a23c
TL
89[ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON="$CEPH_PYTHON_COMMON:"
90CYTHON_PYTHONPATH="$CEPH_LIB/cython_modules/lib.3"
91export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
92
f67539c2
TL
93export LD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$LD_LIBRARY_PATH
94export DYLD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$DYLD_LIBRARY_PATH
31f18b77 95# Suppress logging for regular use that indicated that we are using a
9f95a23c 96# development version. vstart.sh is only used during testing and
31f18b77
FG
97# development
98export CEPH_DEV=1
7c673cae
FG
99
100[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
101[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
102[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
103[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
104[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
105[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
f6b5b4d7 106[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM="$NFS"
7c673cae
FG
107
108# if none of the CEPH_NUM_* number is specified, kill the existing
109# cluster.
110if [ -z "$CEPH_NUM_MON" -a \
111 -z "$CEPH_NUM_OSD" -a \
112 -z "$CEPH_NUM_MDS" -a \
9f95a23c
TL
113 -z "$CEPH_NUM_MGR" -a \
114 -z "$GANESHA_DAEMON_NUM" ]; then
7c673cae
FG
115 kill_all=1
116else
117 kill_all=0
118fi
119
120[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
121[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
122[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
123[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
124[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
125[ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
126[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
9f95a23c 127[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM=0
7c673cae
FG
128
129[ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
130[ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
131[ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
1e59de90 132[ -z "$CEPH_ASOK_DIR" ] && CEPH_ASOK_DIR="$CEPH_DIR/asok"
7c673cae
FG
133[ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
134[ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
1e59de90 135CEPH_OUT_CLIENT_DIR=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
7c673cae
FG
136
137if [ $CEPH_NUM_OSD -gt 3 ]; then
138 OSD_POOL_DEFAULT_SIZE=3
139else
140 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
141fi
142
143extra_conf=""
144new=0
145standby=0
146debug=0
20effc67 147trace=0
7c673cae
FG
148ip=""
149nodaemon=0
9f95a23c 150redirect=0
7c673cae
FG
151smallmds=0
152short=0
1e59de90 153crimson=0
7c673cae 154ec=0
9f95a23c
TL
155cephadm=0
156parallel=true
1e59de90 157restart=1
7c673cae 158hitset=""
9f95a23c 159overwrite_conf=0
7c673cae 160cephx=1 #turn cephx on by default
11fdf7f2 161gssapi_authx=0
7c673cae 162cache=""
11fdf7f2 163if [ `uname` = FreeBSD ]; then
1e59de90 164 objectstore="memstore"
11fdf7f2
TL
165else
166 objectstore="bluestore"
167fi
9f95a23c 168ceph_osd=ceph-osd
11fdf7f2 169rgw_frontend="beast"
224ce89b 170rgw_compression=""
7c673cae 171lockdep=${LOCKDEP:-1}
1e59de90
TL
172spdk_enabled=0 # disable SPDK by default
173pmem_enabled=0
f67539c2
TL
174zoned_enabled=0
175io_uring_enabled=0
176with_jaeger=0
11fdf7f2
TL
177
178with_mgr_dashboard=true
179if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
180 [[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
9f95a23c
TL
181 debug echo "ceph-mgr dashboard not built - disabling."
182 with_mgr_dashboard=false
11fdf7f2 183fi
20effc67 184with_mgr_restful=false
7c673cae 185
11fdf7f2 186kstore_path=
20effc67
TL
187declare -a block_devs
188declare -a secondary_block_devs
1e59de90 189secondary_block_devs_type="SSD"
31f18b77 190
7c673cae
FG
191VSTART_SEC="client.vstart.sh"
192
193MON_ADDR=""
31f18b77
FG
194DASH_URLS=""
195RESTFUL_URLS=""
7c673cae
FG
196
197conf_fn="$CEPH_CONF_PATH/ceph.conf"
198keyring_fn="$CEPH_CONF_PATH/keyring"
7c673cae 199monmap_fn="/tmp/ceph_monmap.$$"
9f95a23c 200inc_osd_num=0
7c673cae 201
11fdf7f2
TL
202msgr="21"
203
20effc67
TL
204read -r -d '' usage <<EOF || true
205usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d
206options:
207 -d, --debug
208 -t, --trace
209 -s, --standby_mds: Generate standby-replay MDS for each active
210 -l, --localhost: use localhost instead of hostname
211 -i <ip>: bind to specific ip
212 -n, --new
213 --valgrind[_{osd,mds,mon,rgw}] 'toolname args...'
214 --nodaemon: use ceph-run as wrapper for mon/osd/mds
215 --redirect-output: only useful with nodaemon, directs output to log file
216 --smallmds: limit mds cache memory limit
217 -m ip:port specify monitor address
218 -k keep old configuration files (default)
219 -x enable cephx (on by default)
220 -X disable cephx
221 -g --gssapi enable Kerberos/GSSApi authentication
222 -G disable Kerberos/GSSApi authentication
223 --hitset <pool> <hit_set_type>: enable hitset tracking
1e59de90
TL
224 -e : create an erasure pool
225 -o config add extra config parameters to all sections
20effc67
TL
226 --rgw_port specify ceph rgw http listen port
227 --rgw_frontend specify the rgw frontend configuration
1e59de90 228 --rgw_arrow_flight start arrow flight frontend
20effc67
TL
229 --rgw_compression specify the rgw compression plugin
230 --seastore use seastore as crimson osd backend
231 -b, --bluestore use bluestore as the osd objectstore backend (default)
20effc67
TL
232 -K, --kstore use kstore as the osd objectstore backend
233 --cyanstore use cyanstore as the osd objectstore backend
234 --memstore use memstore as the osd objectstore backend
235 --cache <pool>: enable cache tiering on pool
236 --short: short object names only; necessary for ext4 dev
237 --nolockdep disable lockdep
238 --multimds <count> allow multimds with maximum active count
239 --without-dashboard: do not run using mgr dashboard
240 --bluestore-spdk: enable SPDK and with a comma-delimited list of PCI-IDs of NVME device (e.g, 0000:81:00.0)
1e59de90 241 --bluestore-pmem: enable PMEM and with path to a file mapped to PMEM
20effc67
TL
242 --msgr1: use msgr1 only
243 --msgr2: use msgr2 only
244 --msgr21: use msgr2 and msgr1
245 --crimson: use crimson-osd instead of ceph-osd
246 --crimson-foreground: use crimson-osd, but run it in the foreground
247 --osd-args: specify any extra osd specific options
248 --bluestore-devs: comma-separated list of blockdevs to use for bluestore
249 --bluestore-zoned: blockdevs listed by --bluestore-devs are zoned devices (HM-SMR HDD or ZNS SSD)
250 --bluestore-io-uring: enable io_uring backend
251 --inc-osd: append some more osds into existing vcluster
252 --cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]
253 --no-parallel: dont start all OSDs in parallel
1e59de90 254 --no-restart: dont restart process when using ceph-run
20effc67 255 --jaeger: use jaegertracing for tracing
aee94f69 256 --seastore-device-size: set total size of seastore
20effc67 257 --seastore-devs: comma-separated list of blockdevs to use for seastore
1e59de90
TL
258 --seastore-secondary-devs: comma-separated list of secondary blockdevs to use for seastore
259 --seastore-secondary-devs-type: device type of all secondary blockdevs. HDD, SSD(default), ZNS or RANDOM_BLOCK_SSD
260 --crimson-smp: number of cores to use for crimson
20effc67
TL
261\n
262EOF
7c673cae
FG
263
264usage_exit() {
9f95a23c
TL
265 printf "$usage"
266 exit
7c673cae
FG
267}
268
20effc67
TL
269parse_block_devs() {
270 local opt_name=$1
271 shift
272 local devs=$1
273 shift
274 local dev
275 IFS=',' read -r -a block_devs <<< "$devs"
276 for dev in "${block_devs[@]}"; do
277 if [ ! -b $dev ] || [ ! -w $dev ]; then
278 echo "All $opt_name must refer to writable block devices"
279 exit 1
280 fi
281 done
282}
283
284parse_secondary_devs() {
285 local opt_name=$1
286 shift
287 local devs=$1
288 shift
289 local dev
290 IFS=',' read -r -a secondary_block_devs <<< "$devs"
291 for dev in "${secondary_block_devs[@]}"; do
292 if [ ! -b $dev ] || [ ! -w $dev ]; then
293 echo "All $opt_name must refer to writable block devices"
294 exit 1
295 fi
296 done
297}
298
1e59de90 299crimson_smp=1
7c673cae
FG
300while [ $# -ge 1 ]; do
301case $1 in
f67539c2 302 -d | --debug)
9f95a23c
TL
303 debug=1
304 ;;
20effc67
TL
305 -t | --trace)
306 trace=1
307 ;;
7c673cae 308 -s | --standby_mds)
9f95a23c
TL
309 standby=1
310 ;;
f67539c2 311 -l | --localhost)
9f95a23c
TL
312 ip="127.0.0.1"
313 ;;
f67539c2 314 -i)
9f95a23c
TL
315 [ -z "$2" ] && usage_exit
316 ip="$2"
317 shift
318 ;;
f67539c2 319 -e)
9f95a23c
TL
320 ec=1
321 ;;
f67539c2 322 --new | -n)
9f95a23c
TL
323 new=1
324 ;;
f67539c2 325 --inc-osd)
9f95a23c
TL
326 new=0
327 kill_all=0
328 inc_osd_num=$2
329 if [ "$inc_osd_num" == "" ]; then
330 inc_osd_num=1
331 else
332 shift
333 fi
334 ;;
f67539c2 335 --short)
9f95a23c
TL
336 short=1
337 ;;
f67539c2 338 --crimson)
1e59de90 339 crimson=1
9f95a23c 340 ceph_osd=crimson-osd
20effc67
TL
341 nodaemon=1
342 msgr=2
343 ;;
344 --crimson-foreground)
1e59de90 345 crimson=1
20effc67
TL
346 ceph_osd=crimson-osd
347 nodaemon=0
348 msgr=2
9f95a23c 349 ;;
f67539c2 350 --osd-args)
9f95a23c
TL
351 extra_osd_args="$2"
352 shift
353 ;;
f67539c2 354 --msgr1)
9f95a23c
TL
355 msgr="1"
356 ;;
f67539c2 357 --msgr2)
9f95a23c
TL
358 msgr="2"
359 ;;
f67539c2 360 --msgr21)
9f95a23c
TL
361 msgr="21"
362 ;;
f67539c2 363 --cephadm)
9f95a23c
TL
364 cephadm=1
365 ;;
f67539c2 366 --no-parallel)
9f95a23c
TL
367 parallel=false
368 ;;
1e59de90
TL
369 --no-restart)
370 restart=0
371 ;;
f67539c2 372 --valgrind)
9f95a23c
TL
373 [ -z "$2" ] && usage_exit
374 valgrind=$2
375 shift
376 ;;
f67539c2 377 --valgrind_args)
9f95a23c
TL
378 valgrind_args="$2"
379 shift
380 ;;
f67539c2 381 --valgrind_mds)
9f95a23c
TL
382 [ -z "$2" ] && usage_exit
383 valgrind_mds=$2
384 shift
385 ;;
f67539c2 386 --valgrind_osd)
9f95a23c
TL
387 [ -z "$2" ] && usage_exit
388 valgrind_osd=$2
389 shift
390 ;;
f67539c2 391 --valgrind_mon)
9f95a23c
TL
392 [ -z "$2" ] && usage_exit
393 valgrind_mon=$2
394 shift
395 ;;
f67539c2 396 --valgrind_mgr)
9f95a23c
TL
397 [ -z "$2" ] && usage_exit
398 valgrind_mgr=$2
399 shift
400 ;;
f67539c2 401 --valgrind_rgw)
9f95a23c
TL
402 [ -z "$2" ] && usage_exit
403 valgrind_rgw=$2
404 shift
405 ;;
f67539c2 406 --nodaemon)
9f95a23c
TL
407 nodaemon=1
408 ;;
409 --redirect-output)
410 redirect=1
411 ;;
f67539c2 412 --smallmds)
9f95a23c
TL
413 smallmds=1
414 ;;
f67539c2 415 --rgw_port)
9f95a23c
TL
416 CEPH_RGW_PORT=$2
417 shift
418 ;;
f67539c2 419 --rgw_frontend)
9f95a23c
TL
420 rgw_frontend=$2
421 shift
422 ;;
1e59de90
TL
423 --rgw_arrow_flight)
424 rgw_flight_frontend="yes"
425 ;;
f67539c2 426 --rgw_compression)
9f95a23c
TL
427 rgw_compression=$2
428 shift
429 ;;
f67539c2 430 --kstore_path)
9f95a23c
TL
431 kstore_path=$2
432 shift
433 ;;
f67539c2 434 -m)
9f95a23c
TL
435 [ -z "$2" ] && usage_exit
436 MON_ADDR=$2
437 shift
438 ;;
f67539c2 439 -x)
9f95a23c
TL
440 cephx=1 # this is on be default, flag exists for historical consistency
441 ;;
f67539c2 442 -X)
9f95a23c
TL
443 cephx=0
444 ;;
445
11fdf7f2 446 -g | --gssapi)
9f95a23c
TL
447 gssapi_authx=1
448 ;;
11fdf7f2 449 -G)
9f95a23c
TL
450 gssapi_authx=0
451 ;;
11fdf7f2 452
f67539c2 453 -k)
9f95a23c
TL
454 if [ ! -r $conf_fn ]; then
455 echo "cannot use old configuration: $conf_fn not readable." >&2
456 exit
457 fi
458 new=0
459 ;;
f67539c2 460 --memstore)
9f95a23c
TL
461 objectstore="memstore"
462 ;;
20effc67
TL
463 --cyanstore)
464 objectstore="cyanstore"
465 ;;
466 --seastore)
467 objectstore="seastore"
468 ;;
f67539c2 469 -b | --bluestore)
9f95a23c
TL
470 objectstore="bluestore"
471 ;;
f67539c2 472 -K | --kstore)
9f95a23c
TL
473 objectstore="kstore"
474 ;;
f67539c2 475 --hitset)
9f95a23c
TL
476 hitset="$hitset $2 $3"
477 shift
478 shift
479 ;;
f67539c2
TL
480 -o)
481 extra_conf+=$'\n'"$2"
9f95a23c
TL
482 shift
483 ;;
f67539c2 484 --cache)
9f95a23c
TL
485 if [ -z "$cache" ]; then
486 cache="$2"
487 else
488 cache="$cache $2"
489 fi
490 shift
491 ;;
f67539c2 492 --nolockdep)
9f95a23c
TL
493 lockdep=0
494 ;;
7c673cae
FG
495 --multimds)
496 CEPH_MAX_MDS="$2"
497 shift
498 ;;
11fdf7f2
TL
499 --without-dashboard)
500 with_mgr_dashboard=false
501 ;;
20effc67
TL
502 --with-restful)
503 with_mgr_restful=true
504 ;;
aee94f69
TL
505 --seastore-device-size)
506 seastore_size="$2"
507 shift
508 ;;
20effc67
TL
509 --seastore-devs)
510 parse_block_devs --seastore-devs "$2"
511 shift
512 ;;
513 --seastore-secondary-devs)
514 parse_secondary_devs --seastore-devs "$2"
515 shift
516 ;;
1e59de90
TL
517 --seastore-secondary-devs-type)
518 secondary_block_devs_type="$2"
519 shift
520 ;;
521 --crimson-smp)
522 crimson_smp=$2
523 shift
524 ;;
f67539c2 525 --bluestore-spdk)
11fdf7f2 526 [ -z "$2" ] && usage_exit
f67539c2 527 IFS=',' read -r -a bluestore_spdk_dev <<< "$2"
11fdf7f2
TL
528 spdk_enabled=1
529 shift
530 ;;
1e59de90
TL
531 --bluestore-pmem)
532 [ -z "$2" ] && usage_exit
533 bluestore_pmem_file="$2"
534 pmem_enabled=1
535 shift
536 ;;
f67539c2 537 --bluestore-devs)
20effc67 538 parse_block_devs --bluestore-devs "$2"
9f95a23c
TL
539 shift
540 ;;
f67539c2
TL
541 --bluestore-zoned)
542 zoned_enabled=1
543 ;;
544 --bluestore-io-uring)
545 io_uring_enabled=1
546 shift
547 ;;
548 --jaeger)
549 with_jaeger=1
550 echo "with_jaeger $with_jaeger"
551 ;;
552 *)
9f95a23c 553 usage_exit
7c673cae
FG
554esac
555shift
556done
557
558if [ $kill_all -eq 1 ]; then
559 $SUDO $INIT_CEPH stop
560fi
561
9f95a23c
TL
562if [ "$new" -eq 0 ]; then
563 if [ -z "$CEPH_ASOK_DIR" ]; then
564 CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
565 fi
c07f9fc5 566 mkdir -p $CEPH_ASOK_DIR
9f95a23c 567 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
7c673cae 568 CEPH_NUM_MON="$MON"
9f95a23c 569 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
7c673cae 570 CEPH_NUM_OSD="$OSD"
9f95a23c 571 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
7c673cae 572 CEPH_NUM_MDS="$MDS"
9f95a23c 573 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
7c673cae 574 CEPH_NUM_MGR="$MGR"
9f95a23c 575 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
7c673cae 576 CEPH_NUM_RGW="$RGW"
f6b5b4d7
TL
577 NFS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
578 GANESHA_DAEMON_NUM="$NFS"
7c673cae 579else
9f95a23c
TL
580 # only delete if -n
581 if [ -e "$conf_fn" ]; then
582 asok_dir=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
583 rm -- "$conf_fn"
584 if [ $asok_dir != /var/run/ceph ]; then
c07f9fc5 585 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
c07f9fc5 586 fi
9f95a23c
TL
587 fi
588 if [ -z "$CEPH_ASOK_DIR" ]; then
589 CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
7c673cae
FG
590 fi
591fi
592
593ARGS="-c $conf_fn"
594
7c673cae
FG
595run() {
596 type=$1
597 shift
9f95a23c
TL
598 num=$1
599 shift
7c673cae
FG
600 eval "valg=\$valgrind_$type"
601 [ -z "$valg" ] && valg="$valgrind"
602
603 if [ -n "$valg" ]; then
604 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
605 sleep 1
606 else
607 if [ "$nodaemon" -eq 0 ]; then
608 prun "$@"
609 else
1e59de90
TL
610 if [ "$restart" -eq 0 ]; then
611 set -- '--no-restart' "$@"
612 fi
613 if [ "$redirect" -eq 0 ]; then
614 prunb ${CEPH_ROOT}/src/ceph-run "$@" -f
615 else
616 ( prunb ${CEPH_ROOT}/src/ceph-run "$@" -f ) >$CEPH_OUT_DIR/$type.$num.stdout 2>&1
617 fi
7c673cae
FG
618 fi
619 fi
620}
621
622wconf() {
9f95a23c
TL
623 if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
624 cat >> "$conf_fn"
625 fi
7c673cae
FG
626}
627
9f95a23c
TL
628
629do_rgw_conf() {
630
631 if [ $CEPH_NUM_RGW -eq 0 ]; then
632 return 0
633 fi
634
635 # setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
636 # individual rgw's ids will be their ports.
637 current_port=$CEPH_RGW_PORT
1e59de90
TL
638 # allow only first rgw to start arrow_flight server/port
639 local flight_conf=$rgw_flight_frontend
9f95a23c
TL
640 for n in $(seq 1 $CEPH_NUM_RGW); do
641 wconf << EOF
642[client.rgw.${current_port}]
1e59de90 643 rgw frontends = $rgw_frontend port=${current_port}${flight_conf:+,arrow_flight}
9f95a23c 644 admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
1e59de90 645 debug rgw_flight = 20
9f95a23c
TL
646EOF
647 current_port=$((current_port + 1))
1e59de90 648 unset flight_conf
9f95a23c
TL
649done
650
11fdf7f2
TL
651}
652
f67539c2
TL
653format_conf() {
654 local opts=$1
655 local indent=" "
656 local opt
657 local formatted
658 while read -r opt; do
659 if [ -z "$formatted" ]; then
660 formatted="${opt}"
661 else
662 formatted+=$'\n'${indent}${opt}
663 fi
664 done <<< "$opts"
665 echo "$formatted"
666}
667
7c673cae
FG
668prepare_conf() {
669 local DAEMONOPTS="
670 log file = $CEPH_OUT_DIR/\$name.log
c07f9fc5 671 admin socket = $CEPH_ASOK_DIR/\$name.asok
7c673cae
FG
672 chdir = \"\"
673 pid file = $CEPH_OUT_DIR/\$name.pid
674 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
675"
676
20effc67 677 local mgr_modules="iostat nfs"
11fdf7f2 678 if $with_mgr_dashboard; then
20effc67
TL
679 mgr_modules+=" dashboard"
680 fi
681 if $with_mgr_restful; then
682 mgr_modules+=" restful"
11fdf7f2
TL
683 fi
684
685 local msgr_conf=''
686 if [ $msgr -eq 21 ]; then
f67539c2
TL
687 msgr_conf="ms bind msgr2 = true
688 ms bind msgr1 = true"
11fdf7f2
TL
689 fi
690 if [ $msgr -eq 2 ]; then
f67539c2
TL
691 msgr_conf="ms bind msgr2 = true
692 ms bind msgr1 = false"
11fdf7f2
TL
693 fi
694 if [ $msgr -eq 1 ]; then
f67539c2
TL
695 msgr_conf="ms bind msgr2 = false
696 ms bind msgr1 = true"
11fdf7f2
TL
697 fi
698
7c673cae
FG
699 wconf <<EOF
700; generated by vstart.sh on `date`
701[$VSTART_SEC]
702 num mon = $CEPH_NUM_MON
703 num osd = $CEPH_NUM_OSD
704 num mds = $CEPH_NUM_MDS
705 num mgr = $CEPH_NUM_MGR
706 num rgw = $CEPH_NUM_RGW
9f95a23c 707 num ganesha = $GANESHA_DAEMON_NUM
7c673cae
FG
708
709[global]
710 fsid = $(uuidgen)
7c673cae 711 osd failsafe full ratio = .99
11fdf7f2 712 mon osd full ratio = .99
c07f9fc5
FG
713 mon osd nearfull ratio = .99
714 mon osd backfillfull ratio = .99
9f95a23c 715 mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
7c673cae
FG
716 erasure code dir = $EC_PATH
717 plugin dir = $CEPH_LIB
7c673cae 718 run dir = $CEPH_OUT_DIR
9f95a23c 719 crash dir = $CEPH_OUT_DIR
7c673cae 720 enable experimental unrecoverable data corrupting features = *
9f95a23c
TL
721 osd_crush_chooseleaf_type = 0
722 debug asok assert abort = true
f67539c2
TL
723 $(format_conf "${msgr_conf}")
724 $(format_conf "${extra_conf}")
1e59de90 725 $AUTOSCALER_OPTS
7c673cae 726EOF
1e59de90
TL
727 if [ "$with_jaeger" -eq 1 ] ; then
728 wconf <<EOF
729 jaeger_agent_port = 6831
730EOF
731 fi
9f95a23c
TL
732 if [ "$lockdep" -eq 1 ] ; then
733 wconf <<EOF
7c673cae
FG
734 lockdep = true
735EOF
9f95a23c
TL
736 fi
737 if [ "$cephx" -eq 1 ] ; then
738 wconf <<EOF
739 auth cluster required = cephx
740 auth service required = cephx
741 auth client required = cephx
7c673cae 742EOF
9f95a23c
TL
743 elif [ "$gssapi_authx" -eq 1 ] ; then
744 wconf <<EOF
745 auth cluster required = gss
746 auth service required = gss
747 auth client required = gss
748 gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
11fdf7f2 749EOF
9f95a23c
TL
750 else
751 wconf <<EOF
752 auth cluster required = none
753 auth service required = none
754 auth client required = none
20effc67 755 ms mon client mode = crc
7c673cae 756EOF
9f95a23c
TL
757 fi
758 if [ "$short" -eq 1 ]; then
759 COSDSHORT=" osd max object name len = 460
7c673cae 760 osd max object namespace len = 64"
9f95a23c
TL
761 fi
762 if [ "$objectstore" == "bluestore" ]; then
1e59de90 763 if [ "$spdk_enabled" -eq 1 ] || [ "$pmem_enabled" -eq 1 ]; then
9f95a23c 764 BLUESTORE_OPTS=" bluestore_block_db_path = \"\"
11fdf7f2
TL
765 bluestore_block_db_size = 0
766 bluestore_block_db_create = false
767 bluestore_block_wal_path = \"\"
768 bluestore_block_wal_size = 0
769 bluestore_block_wal_create = false
9f95a23c
TL
770 bluestore_spdk_mem = 2048"
771 else
772 BLUESTORE_OPTS=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
11fdf7f2
TL
773 bluestore block db size = 1073741824
774 bluestore block db create = true
775 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
776 bluestore block wal size = 1048576000
777 bluestore block wal create = true"
11fdf7f2 778 fi
f67539c2
TL
779 if [ "$zoned_enabled" -eq 1 ]; then
780 BLUESTORE_OPTS+="
781 bluestore min alloc size = 65536
782 bluestore prefer deferred size = 0
783 bluestore prefer deferred size hdd = 0
784 bluestore prefer deferred size ssd = 0
785 bluestore allocator = zoned"
786 fi
787 if [ "$io_uring_enabled" -eq 1 ]; then
788 BLUESTORE_OPTS+="
789 bdev ioring = true"
790 fi
9f95a23c 791 fi
aee94f69
TL
792
793 if [ "$objectstore" == "seastore" ]; then
794 if [[ ${seastore_size+x} ]]; then
795 SEASTORE_OPTS="
796 seastore device size = $seastore_size"
797 fi
798 fi
799
9f95a23c 800 wconf <<EOF
7c673cae 801[client]
1e59de90 802$CCLIENTDEBUG
7c673cae 803 keyring = $keyring_fn
1e59de90 804 log file = $CEPH_OUT_CLIENT_DIR/\$name.\$pid.log
c07f9fc5 805 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
9f95a23c 806
11fdf7f2 807 ; needed for s3tests
9f95a23c 808 rgw crypt s3 kms backend = testing
11fdf7f2
TL
809 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
810 rgw crypt require ssl = false
811 ; uncomment the following to set LC days as the value in seconds;
812 ; needed for passing lc time based s3-tests (can be verbose)
813 ; rgw lc debug interval = 10
f67539c2 814 $(format_conf "${extra_conf}")
9f95a23c 815EOF
9f95a23c
TL
816 do_rgw_conf
817 wconf << EOF
7c673cae 818[mds]
1e59de90 819$CMDSDEBUG
7c673cae 820$DAEMONOPTS
7c673cae
FG
821 mds data = $CEPH_DEV_DIR/mds.\$id
822 mds root ino uid = `id -u`
823 mds root ino gid = `id -g`
f67539c2 824 $(format_conf "${extra_conf}")
7c673cae 825[mgr]
20effc67 826 mgr disabled modules = rook
7c673cae
FG
827 mgr data = $CEPH_DEV_DIR/mgr.\$id
828 mgr module path = $MGR_PYTHON_PATH
1e59de90 829 cephadm path = $CEPH_BIN/cephadm
7c673cae 830$DAEMONOPTS
f67539c2 831 $(format_conf "${extra_conf}")
7c673cae
FG
832[osd]
833$DAEMONOPTS
834 osd_check_max_object_name_len_on_startup = false
835 osd data = $CEPH_DEV_DIR/osd\$id
836 osd journal = $CEPH_DEV_DIR/osd\$id/journal
837 osd journal size = 100
838 osd class tmp = out
839 osd class dir = $OBJCLASS_PATH
840 osd class load list = *
841 osd class default list = *
92f5a8d4 842 osd fast shutdown = false
11fdf7f2 843
7c673cae
FG
844 bluestore fsck on mount = true
845 bluestore block create = true
11fdf7f2
TL
846$BLUESTORE_OPTS
847
848 ; kstore
849 kstore fsck on mount = true
850 osd objectstore = $objectstore
aee94f69 851$SEASTORE_OPTS
7c673cae 852$COSDSHORT
f67539c2 853 $(format_conf "${extra_conf}")
7c673cae 854[mon]
20effc67 855 mon_data_avail_crit = 1
11fdf7f2 856 mgr initial modules = $mgr_modules
7c673cae
FG
857$DAEMONOPTS
858$CMONDEBUG
f67539c2 859 $(format_conf "${extra_conf}")
7c673cae 860 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
11fdf7f2 861 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
f67539c2
TL
862 auth allow insecure global id reclaim = false
863EOF
1e59de90
TL
864
865 if [ "$crimson" -eq 1 ]; then
866 wconf <<EOF
867 osd pool default crimson = true
868EOF
869 fi
f67539c2
TL
870}
871
872write_logrotate_conf() {
873 out_dir=$(pwd)"/out/*.log"
874
875 cat << EOF
876$out_dir
877{
878 rotate 5
879 size 1G
880 copytruncate
881 compress
882 notifempty
883 missingok
884 sharedscripts
885 postrotate
886 # NOTE: assuring that the absence of one of the following processes
887 # won't abort the logrotate command.
888 killall -u $USER -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || echo ""
889 endscript
890}
7c673cae
FG
891EOF
892}
893
f67539c2
TL
894init_logrotate() {
895 logrotate_conf_path=$(pwd)"/logrotate.conf"
896 logrotate_state_path=$(pwd)"/logrotate.state"
897
898 if ! test -a $logrotate_conf_path; then
899 if test -a $logrotate_state_path; then
900 rm -f $logrotate_state_path
901 fi
902 write_logrotate_conf > $logrotate_conf_path
903 fi
904}
905
7c673cae
FG
906start_mon() {
907 local MONS=""
908 local count=0
909 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
910 do
911 [ $count -eq $CEPH_NUM_MON ] && break;
912 count=$(($count + 1))
9f95a23c
TL
913 if [ -z "$MONS" ]; then
914 MONS="$f"
7c673cae 915 else
9f95a23c 916 MONS="$MONS $f"
7c673cae
FG
917 fi
918 done
919
920 if [ "$new" -eq 1 ]; then
9f95a23c
TL
921 if [ `echo $IP | grep '^127\\.'` ]; then
922 echo
923 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
924 echo " connect. either adjust /etc/hosts, or edit this script to use your"
925 echo " machine's real IP."
926 echo
927 fi
928
929 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
930 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
931 --cap mon 'allow *' \
932 --cap osd 'allow *' \
933 --cap mds 'allow *' \
934 --cap mgr 'allow *' \
935 "$keyring_fn"
936
937 # build a fresh fs monmap, mon fs
938 local params=()
939 local count=0
940 local mon_host=""
941 for f in $MONS
942 do
943 if [ $msgr -eq 1 ]; then
944 A="v1:$IP:$(($CEPH_PORT+$count+1))"
945 fi
946 if [ $msgr -eq 2 ]; then
947 A="v2:$IP:$(($CEPH_PORT+$count+1))"
948 fi
949 if [ $msgr -eq 21 ]; then
950 A="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
951 fi
952 params+=("--addv" "$f" "$A")
953 mon_host="$mon_host $A"
954 wconf <<EOF
7c673cae
FG
955[mon.$f]
956 host = $HOSTNAME
957 mon data = $CEPH_DEV_DIR/mon.$f
7c673cae 958EOF
9f95a23c
TL
959 count=$(($count + 2))
960 done
961 wconf <<EOF
11fdf7f2
TL
962[global]
963 mon host = $mon_host
964EOF
9f95a23c 965 prun "$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
7c673cae 966
9f95a23c
TL
967 for f in $MONS
968 do
969 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
970 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
971 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
972 done
7c673cae 973
9f95a23c
TL
974 prun rm -- "$monmap_fn"
975 fi
7c673cae 976
9f95a23c
TL
977 # start monitors
978 for f in $MONS
979 do
980 run 'mon' $f $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
981 done
1e59de90
TL
982
983 if [ "$crimson" -eq 1 ]; then
984 $CEPH_BIN/ceph osd set-allow-crimson --yes-i-really-mean-it
985 fi
7c673cae
FG
986}
987
988start_osd() {
9f95a23c
TL
989 if [ $inc_osd_num -gt 0 ]; then
990 old_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
991 start=$old_maxosd
992 end=$(($start-1+$inc_osd_num))
993 overwrite_conf=1 # fake wconf
994 else
995 start=0
996 end=$(($CEPH_NUM_OSD-1))
997 fi
998 local osds_wait
999 for osd in `seq $start $end`
7c673cae 1000 do
9f95a23c
TL
1001 local extra_seastar_args
1002 if [ "$ceph_osd" == "crimson-osd" ]; then
1e59de90
TL
1003 bottom_cpu=$(( osd * crimson_smp ))
1004 top_cpu=$(( bottom_cpu + crimson_smp - 1 ))
1005 # set a single CPU nodes for each osd
1006 extra_seastar_args="--cpuset $bottom_cpu-$top_cpu"
9f95a23c
TL
1007 if [ "$debug" -ne 0 ]; then
1008 extra_seastar_args+=" --debug"
1009 fi
20effc67
TL
1010 if [ "$trace" -ne 0 ]; then
1011 extra_seastar_args+=" --trace"
1012 fi
9f95a23c
TL
1013 fi
1014 if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
1015 wconf <<EOF
7c673cae
FG
1016[osd.$osd]
1017 host = $HOSTNAME
1018EOF
9f95a23c
TL
1019 if [ "$spdk_enabled" -eq 1 ]; then
1020 wconf <<EOF
f67539c2 1021 bluestore_block_path = spdk:${bluestore_spdk_dev[$osd]}
1e59de90
TL
1022EOF
1023 elif [ "$pmem_enabled" -eq 1 ]; then
1024 wconf <<EOF
1025 bluestore_block_path = ${bluestore_pmem_file}
9f95a23c
TL
1026EOF
1027 fi
7c673cae
FG
1028 rm -rf $CEPH_DEV_DIR/osd$osd || true
1029 if command -v btrfs > /dev/null; then
1030 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
1031 fi
1e59de90 1032 if [ -n "$kstore_path" ]; then
9f95a23c
TL
1033 ln -s $kstore_path $CEPH_DEV_DIR/osd$osd
1034 else
1035 mkdir -p $CEPH_DEV_DIR/osd$osd
20effc67
TL
1036 if [ -n "${block_devs[$osd]}" ]; then
1037 dd if=/dev/zero of=${block_devs[$osd]} bs=1M count=1
1038 ln -s ${block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block
1039 fi
1040 if [ -n "${secondary_block_devs[$osd]}" ]; then
1041 dd if=/dev/zero of=${secondary_block_devs[$osd]} bs=1M count=1
1e59de90
TL
1042 mkdir -p $CEPH_DEV_DIR/osd$osd/block.${secondary_block_devs_type}.1
1043 ln -s ${secondary_block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block.${secondary_block_devs_type}.1/block
20effc67
TL
1044 fi
1045 fi
1046 if [ "$objectstore" == "bluestore" ]; then
1047 wconf <<EOF
9f95a23c
TL
1048 bluestore fsck on mount = false
1049EOF
9f95a23c 1050 fi
7c673cae
FG
1051
1052 local uuid=`uuidgen`
1053 echo "add osd$osd $uuid"
9f95a23c
TL
1054 OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
1055 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
11fdf7f2 1056 ceph_adm osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
9f95a23c 1057 rm $CEPH_DEV_DIR/osd$osd/new.json
1e59de90
TL
1058 prun $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args \
1059 2>&1 | tee $CEPH_OUT_DIR/osd-mkfs.$osd.log
7c673cae
FG
1060
1061 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
9f95a23c 1062 cat > $key_fn<<EOF
3efd9988 1063[osd.$osd]
9f95a23c 1064 key = $OSD_SECRET
3efd9988 1065EOF
7c673cae 1066 fi
31f18b77 1067 echo start osd.$osd
9f95a23c 1068 local osd_pid
1e59de90
TL
1069 echo 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
1070 $extra_seastar_args $extra_osd_args \
1071 -i $osd $ARGS $COSD_ARGS
9f95a23c
TL
1072 run 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
1073 $extra_seastar_args $extra_osd_args \
1074 -i $osd $ARGS $COSD_ARGS &
1075 osd_pid=$!
1076 if $parallel; then
1077 osds_wait=$osd_pid
1078 else
1079 wait $osd_pid
1080 fi
7c673cae 1081 done
9f95a23c
TL
1082 if $parallel; then
1083 for p in $osds_wait; do
1084 wait $p
1085 done
1086 debug echo OSDs started
1087 fi
1088 if [ $inc_osd_num -gt 0 ]; then
1089 # update num osd
1090 new_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1091 sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
1092 fi
7c673cae
FG
1093}
1094
20effc67
TL
1095create_mgr_restful_secret() {
1096 while ! ceph_adm -h | grep -c -q ^restful ; do
1097 debug echo 'waiting for mgr restful module to start'
1098 sleep 1
1099 done
1100 local secret_file
1101 if ceph_adm restful create-self-signed-cert > /dev/null; then
1102 secret_file=`mktemp`
1103 ceph_adm restful create-key admin -o $secret_file
1104 RESTFUL_SECRET=`cat $secret_file`
1105 rm $secret_file
1106 else
1107 debug echo MGR Restful is not working, perhaps the package is not installed?
1108 fi
1109}
1110
7c673cae
FG
1111start_mgr() {
1112 local mgr=0
11fdf7f2 1113 local ssl=${DASHBOARD_SSL:-1}
31f18b77
FG
1114 # avoid monitors on nearby ports (which test/*.sh use extensively)
1115 MGR_PORT=$(($CEPH_PORT + 1000))
9f95a23c 1116 PROMETHEUS_PORT=9283
7c673cae
FG
1117 for name in x y z a b c d e f g h i j k l m n o p
1118 do
1119 [ $mgr -eq $CEPH_NUM_MGR ] && break
1120 mgr=$(($mgr + 1))
1121 if [ "$new" -eq 1 ]; then
1122 mkdir -p $CEPH_DEV_DIR/mgr.$name
1123 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
1124 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
1125 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
7c673cae 1126
11fdf7f2 1127 wconf <<EOF
7c673cae
FG
1128[mgr.$name]
1129 host = $HOSTNAME
1130EOF
1131
11fdf7f2
TL
1132 if $with_mgr_dashboard ; then
1133 local port_option="ssl_server_port"
1134 local http_proto="https"
1135 if [ "$ssl" == "0" ]; then
1136 port_option="server_port"
1137 http_proto="http"
1138 ceph_adm config set mgr mgr/dashboard/ssl false --force
1139 fi
1140 ceph_adm config set mgr mgr/dashboard/$name/$port_option $MGR_PORT --force
1141 if [ $mgr -eq 1 ]; then
1142 DASH_URLS="$http_proto://$IP:$MGR_PORT"
1143 else
1144 DASH_URLS+=", $http_proto://$IP:$MGR_PORT"
1145 fi
1146 fi
1147 MGR_PORT=$(($MGR_PORT + 1000))
9f95a23c
TL
1148 ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
1149 PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
31f18b77 1150
11fdf7f2
TL
1151 ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
1152 if [ $mgr -eq 1 ]; then
1153 RESTFUL_URLS="https://$IP:$MGR_PORT"
1154 else
1155 RESTFUL_URLS+=", https://$IP:$MGR_PORT"
1156 fi
1157 MGR_PORT=$(($MGR_PORT + 1000))
1158 fi
31f18b77 1159
9f95a23c
TL
1160 debug echo "Starting mgr.${name}"
1161 run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
7c673cae 1162 done
31f18b77 1163
1e59de90
TL
1164 while ! ceph_adm mgr stat | jq -e '.available'; do
1165 debug echo 'waiting for mgr to become available'
1166 sleep 1
1167 done
1168
11fdf7f2
TL
1169 if [ "$new" -eq 1 ]; then
1170 # setting login credentials for dashboard
1171 if $with_mgr_dashboard; then
9f95a23c
TL
1172 while ! ceph_adm -h | grep -c -q ^dashboard ; do
1173 debug echo 'waiting for mgr dashboard module to start'
1174 sleep 1
1175 done
cd265ab1
TL
1176 DASHBOARD_ADMIN_SECRET_FILE="${CEPH_CONF_PATH}/dashboard-admin-secret.txt"
1177 printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
1178 ceph_adm dashboard ac-user-create admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" \
1179 administrator --force-password
11fdf7f2 1180 if [ "$ssl" != "0" ]; then
9f95a23c
TL
1181 if ! ceph_adm dashboard create-self-signed-cert; then
1182 debug echo dashboard module not working correctly!
11fdf7f2 1183 fi
7c673cae 1184 fi
11fdf7f2 1185 fi
20effc67
TL
1186 if $with_mgr_restful; then
1187 create_mgr_restful_secret
7c673cae
FG
1188 fi
1189 fi
9f95a23c
TL
1190
1191 if [ "$cephadm" -eq 1 ]; then
1192 debug echo Enabling cephadm orchestrator
f6b5b4d7
TL
1193 if [ "$new" -eq 1 ]; then
1194 digest=$(curl -s \
20effc67 1195 https://hub.docker.com/v2/repositories/ceph/daemon-base/tags/latest-master-devel \
f67539c2 1196 | jq -r '.images[0].digest')
f6b5b4d7
TL
1197 ceph_adm config set global container_image "docker.io/ceph/daemon-base@$digest"
1198 fi
9f95a23c
TL
1199 ceph_adm config-key set mgr/cephadm/ssh_identity_key -i ~/.ssh/id_rsa
1200 ceph_adm config-key set mgr/cephadm/ssh_identity_pub -i ~/.ssh/id_rsa.pub
1201 ceph_adm mgr module enable cephadm
1202 ceph_adm orch set backend cephadm
f6b5b4d7 1203 ceph_adm orch host add "$(hostname)"
9f95a23c
TL
1204 ceph_adm orch apply crash '*'
1205 ceph_adm config set mgr mgr/cephadm/allow_ptrace true
1206 fi
11fdf7f2 1207}
7c673cae 1208
11fdf7f2 1209start_mds() {
7c673cae
FG
1210 local mds=0
1211 for name in a b c d e f g h i j k l m n o p
1212 do
9f95a23c
TL
1213 [ $mds -eq $CEPH_NUM_MDS ] && break
1214 mds=$(($mds + 1))
7c673cae 1215
9f95a23c
TL
1216 if [ "$new" -eq 1 ]; then
1217 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
1218 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
1219 wconf <<EOF
7c673cae
FG
1220[mds.$name]
1221 host = $HOSTNAME
1222EOF
9f95a23c
TL
1223 if [ "$standby" -eq 1 ]; then
1224 mkdir -p $CEPH_DEV_DIR/mds.${name}s
1225 wconf <<EOF
1226 mds standby for rank = $mds
7c673cae
FG
1227[mds.${name}s]
1228 mds standby replay = true
1229 mds standby for name = ${name}
1230EOF
9f95a23c
TL
1231 fi
1232 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
1233 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow rw tag cephfs *=*' mds 'allow' mgr 'allow profile mds'
1234 if [ "$standby" -eq 1 ]; then
1235 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
1236 "$CEPH_DEV_DIR/mds.${name}s/keyring"
1237 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
1238 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
1239 fi
1240 fi
7c673cae 1241
9f95a23c
TL
1242 run 'mds' $name $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
1243 if [ "$standby" -eq 1 ]; then
1244 run 'mds' $name $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
1245 fi
7c673cae
FG
1246
1247 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
1248 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
1249 #ceph_adm mds set max_mds 2
1250 done
11fdf7f2
TL
1251
1252 if [ $new -eq 1 ]; then
1253 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
1254 sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
1255 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
1256 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
1257 fi
1258
9f95a23c
TL
1259 # wait for volume module to load
1260 while ! ceph_adm fs volume ls ; do sleep 1 ; done
11fdf7f2
TL
1261 local fs=0
1262 for name in a b c d e f g h i j k l m n o p
1263 do
9f95a23c
TL
1264 ceph_adm fs volume create ${name}
1265 ceph_adm fs authorize ${name} "client.fs_${name}" / rwp >> "$keyring_fn"
11fdf7f2
TL
1266 fs=$(($fs + 1))
1267 [ $fs -eq $CEPH_NUM_FS ] && break
1268 done
1269 fi
1270 fi
1271
7c673cae
FG
1272}
1273
9f95a23c 1274# Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
f6b5b4d7
TL
1275# nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
1276# Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
1277# the packages are available at
1278# https://wiki.centos.org/SpecialInterestGroup/Storage
1279# Similarly for Ubuntu>=16.04 follow the instructions on
1280# https://launchpad.net/~nfs-ganesha
9f95a23c
TL
1281
1282start_ganesha() {
f6b5b4d7 1283 cluster_id="vstart"
9f95a23c
TL
1284 GANESHA_PORT=$(($CEPH_PORT + 4000))
1285 local ganesha=0
f67539c2 1286 test_user="$cluster_id"
a4b75251 1287 pool_name=".nfs"
f6b5b4d7
TL
1288 namespace=$cluster_id
1289 url="rados://$pool_name/$namespace/conf-nfs.$test_user"
1290
1291 prun ceph_adm auth get-or-create client.$test_user \
1292 mon "allow r" \
1293 osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
1294 mds "allow rw path=/" \
1295 >> "$keyring_fn"
1296
1297 ceph_adm mgr module enable test_orchestrator
1298 ceph_adm orch set backend test_orchestrator
1299 ceph_adm test_orchestrator load_data -i $CEPH_ROOT/src/pybind/mgr/test_orchestrator/dummy_data.json
b3b6e05e 1300 prun ceph_adm nfs cluster create $cluster_id
a4b75251 1301 prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path "/cephfs"
9f95a23c
TL
1302
1303 for name in a b c d e f g h i j k l m n o p
1304 do
1305 [ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
1306
1307 port=$(($GANESHA_PORT + ganesha))
1308 ganesha=$(($ganesha + 1))
1309 ganesha_dir="$CEPH_DEV_DIR/ganesha.$name"
9f95a23c
TL
1310 prun rm -rf $ganesha_dir
1311 prun mkdir -p $ganesha_dir
1312
1313 echo "NFS_CORE_PARAM {
f6b5b4d7
TL
1314 Enable_NLM = false;
1315 Enable_RQUOTA = false;
1316 Protocols = 4;
1317 NFS_Port = $port;
1318 }
1319
1320 MDCACHE {
1321 Dir_Chunk = 0;
1322 }
1323
1324 NFSv4 {
1325 RecoveryBackend = rados_cluster;
1326 Minor_Versions = 1, 2;
1327 }
1328
f6b5b4d7 1329 RADOS_KV {
a4b75251 1330 pool = '$pool_name';
f6b5b4d7
TL
1331 namespace = $namespace;
1332 UserId = $test_user;
1333 nodeid = $name;
1334 }
1335
1336 RADOS_URLS {
1337 Userid = $test_user;
f67539c2
TL
1338 watch_url = '$url';
1339 }
1340
1341 %url $url" > "$ganesha_dir/ganesha-$name.conf"
9f95a23c
TL
1342 wconf <<EOF
1343[ganesha.$name]
1344 host = $HOSTNAME
1345 ip = $IP
1346 port = $port
1347 ganesha data = $ganesha_dir
a4b75251 1348 pid file = $CEPH_OUT_DIR/ganesha-$name.pid
9f95a23c
TL
1349EOF
1350
f6b5b4d7
TL
1351 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace add $name
1352 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
9f95a23c 1353
f6b5b4d7 1354 prun env CEPH_CONF="${conf_fn}" ganesha.nfsd -L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
9f95a23c
TL
1355
1356 # Wait few seconds for grace period to be removed
1357 sleep 2
f6b5b4d7
TL
1358
1359 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
9f95a23c 1360
f6b5b4d7 1361 echo "$test_user ganesha daemon $name started on port: $port"
9f95a23c 1362 done
9f95a23c
TL
1363}
1364
7c673cae
FG
1365if [ "$debug" -eq 0 ]; then
1366 CMONDEBUG='
1367 debug mon = 10
1368 debug ms = 1'
1e59de90
TL
1369 CCLIENTDEBUG=''
1370 CMDSDEBUG=''
7c673cae 1371else
9f95a23c 1372 debug echo "** going verbose **"
7c673cae 1373 CMONDEBUG='
1e59de90 1374 debug osd = 20
7c673cae 1375 debug mon = 20
1e59de90 1376 debug osd = 20
7c673cae
FG
1377 debug paxos = 20
1378 debug auth = 20
9f95a23c 1379 debug mgrc = 20
7c673cae 1380 debug ms = 1'
1e59de90
TL
1381 CCLIENTDEBUG='
1382 debug client = 20'
1383 CMDSDEBUG='
1384 debug mds = 20'
1385fi
1386
1387# Crimson doesn't support PG merge/split yet.
1388if [ "$ceph_osd" == "crimson-osd" ]; then
1389 AUTOSCALER_OPTS='
1390 osd_pool_default_pg_autoscale_mode = off'
7c673cae
FG
1391fi
1392
1393if [ -n "$MON_ADDR" ]; then
9f95a23c
TL
1394 CMON_ARGS=" -m "$MON_ADDR
1395 COSD_ARGS=" -m "$MON_ADDR
1396 CMDS_ARGS=" -m "$MON_ADDR
7c673cae
FG
1397fi
1398
9f95a23c
TL
1399if [ -z "$CEPH_PORT" ]; then
1400 while [ true ]
1401 do
1402 CEPH_PORT="$(echo $(( RANDOM % 1000 + 40000 )))"
1403 ss -a -n | egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev/null 2>&1 || break
1404 done
7c673cae
FG
1405fi
1406
1407[ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
1408
1409# sudo if btrfs
9f95a23c 1410[ -d $CEPH_DEV_DIR/osd0/. ] && [ -e $CEPH_DEV_DIR/sudo ] && SUDO="sudo"
7c673cae 1411
9f95a23c
TL
1412if [ $inc_osd_num -eq 0 ]; then
1413 prun $SUDO rm -f core*
1414fi
7c673cae 1415
9f95a23c
TL
1416[ -d $CEPH_ASOK_DIR ] || mkdir -p $CEPH_ASOK_DIR
1417[ -d $CEPH_OUT_DIR ] || mkdir -p $CEPH_OUT_DIR
1418[ -d $CEPH_DEV_DIR ] || mkdir -p $CEPH_DEV_DIR
1e59de90 1419[ -d $CEPH_OUT_CLIENT_DIR ] || mkdir -p $CEPH_OUT_CLIENT_DIR
9f95a23c 1420if [ $inc_osd_num -eq 0 ]; then
f67539c2 1421 $SUDO find "$CEPH_OUT_DIR" -type f -delete
9f95a23c
TL
1422fi
1423[ -d gmon ] && $SUDO rm -rf gmon/*
7c673cae 1424
9f95a23c 1425[ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
7c673cae
FG
1426
1427
1428# figure machine's ip
1429HOSTNAME=`hostname -s`
1430if [ -n "$ip" ]; then
1431 IP="$ip"
1432else
1433 echo hostname $HOSTNAME
1434 if [ -x "$(which ip 2>/dev/null)" ]; then
9f95a23c 1435 IP_CMD="ip addr"
7c673cae 1436 else
9f95a23c 1437 IP_CMD="ifconfig"
7c673cae 1438 fi
9f95a23c 1439 # filter out IPv4 and localhost addresses
7c673cae
FG
1440 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
1441 # if nothing left, try using localhost address, it might work
1442 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
1443fi
1444echo "ip $IP"
1445echo "port $CEPH_PORT"
1446
1447
1448[ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
1449
1450ceph_adm() {
1451 if [ "$cephx" -eq 1 ]; then
1452 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
1453 else
1454 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
1455 fi
1456}
1457
9f95a23c
TL
1458if [ $inc_osd_num -gt 0 ]; then
1459 start_osd
1460 exit
1461fi
1462
7c673cae
FG
1463if [ "$new" -eq 1 ]; then
1464 prepare_conf
1465fi
1466
1467if [ $CEPH_NUM_MON -gt 0 ]; then
1468 start_mon
11fdf7f2 1469
9f95a23c 1470 debug echo Populating config ...
11fdf7f2
TL
1471 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1472[global]
1473osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
1474osd_pool_default_min_size = 1
11fdf7f2
TL
1475
1476[mon]
1477mon_osd_reporter_subtree_level = osd
1478mon_data_avail_warn = 2
1479mon_data_avail_crit = 1
1480mon_allow_pool_delete = true
f67539c2 1481mon_allow_pool_size_one = true
11fdf7f2
TL
1482
1483[osd]
1484osd_scrub_load_threshold = 2000
1485osd_debug_op_order = true
1486osd_debug_misdirected_ops = true
1487osd_copyfrom_max_chunk = 524288
1488
1489[mds]
1490mds_debug_frag = true
1491mds_debug_auth_pins = true
1492mds_debug_subtrees = true
1493
9f95a23c
TL
1494[mgr]
1495mgr/telemetry/nag = false
1496mgr/telemetry/enable = false
1497
11fdf7f2
TL
1498EOF
1499
1500 if [ "$debug" -ne 0 ]; then
9f95a23c
TL
1501 debug echo Setting debug configs ...
1502 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
11fdf7f2
TL
1503[mgr]
1504debug_ms = 1
1505debug_mgr = 20
1506debug_monc = 20
1507debug_mon = 20
1508
1509[osd]
1510debug_ms = 1
1511debug_osd = 25
1512debug_objecter = 20
1513debug_monc = 20
1514debug_mgrc = 20
1515debug_journal = 20
11fdf7f2
TL
1516debug_bluestore = 20
1517debug_bluefs = 20
1518debug_rocksdb = 20
1519debug_bdev = 20
1520debug_reserver = 10
1521debug_objclass = 20
1522
1523[mds]
1524debug_ms = 1
1525debug_mds = 20
1526debug_monc = 20
1527debug_mgrc = 20
1528mds_debug_scatterstat = true
1529mds_verify_scatter = true
1530EOF
1531 fi
f6b5b4d7
TL
1532 if [ "$cephadm" -gt 0 ]; then
1533 debug echo Setting mon public_network ...
1534 public_network=$(ip route list | grep -w "$IP" | awk '{print $1}')
1535 ceph_adm config set mon public_network $public_network
1536 fi
7c673cae
FG
1537fi
1538
1e59de90
TL
1539if [ "$ceph_osd" == "crimson-osd" ]; then
1540 $CEPH_BIN/ceph -c $conf_fn config set osd crimson_seastar_smp $crimson_smp
1541fi
1542
c07f9fc5
FG
1543if [ $CEPH_NUM_MGR -gt 0 ]; then
1544 start_mgr
1545fi
1546
7c673cae
FG
1547# osd
1548if [ $CEPH_NUM_OSD -gt 0 ]; then
1549 start_osd
1550fi
1551
1552# mds
1553if [ "$smallmds" -eq 1 ]; then
1554 wconf <<EOF
1555[mds]
9f95a23c
TL
1556 mds log max segments = 2
1557 # Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
1558 mds cache memory limit = 100M
7c673cae
FG
1559EOF
1560fi
1561
1562if [ $CEPH_NUM_MDS -gt 0 ]; then
1563 start_mds
9f95a23c
TL
1564 # key with access to all FS
1565 ceph_adm fs authorize \* "client.fs" / rwp >> "$keyring_fn"
7c673cae
FG
1566fi
1567
1568# Don't set max_mds until all the daemons are started, otherwise
1569# the intended standbys might end up in active roles.
1570if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1571 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
1572fi
1573fs=0
1574for name in a b c d e f g h i j k l m n o p
1575do
1576 [ $fs -eq $CEPH_NUM_FS ] && break
1577 fs=$(($fs + 1))
1578 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
f91f0fd5 1579 ceph_adm fs set "${name}" max_mds "$CEPH_MAX_MDS"
7c673cae
FG
1580 fi
1581done
1582
1583# mgr
1584
7c673cae
FG
1585if [ "$ec" -eq 1 ]; then
1586 ceph_adm <<EOF
1587osd erasure-code-profile set ec-profile m=2 k=2
9f95a23c 1588osd pool create ec erasure ec-profile
7c673cae
FG
1589EOF
1590fi
1591
1592do_cache() {
1593 while [ -n "$*" ]; do
9f95a23c
TL
1594 p="$1"
1595 shift
1596 debug echo "creating cache for pool $p ..."
1597 ceph_adm <<EOF
1598osd pool create ${p}-cache
7c673cae
FG
1599osd tier add $p ${p}-cache
1600osd tier cache-mode ${p}-cache writeback
1601osd tier set-overlay $p ${p}-cache
1602EOF
1603 done
1604}
1605do_cache $cache
1606
1607do_hitsets() {
1608 while [ -n "$*" ]; do
9f95a23c
TL
1609 pool="$1"
1610 type="$2"
1611 shift
1612 shift
1613 debug echo "setting hit_set on pool $pool type $type ..."
1614 ceph_adm <<EOF
7c673cae
FG
1615osd pool set $pool hit_set_type $type
1616osd pool set $pool hit_set_count 8
1617osd pool set $pool hit_set_period 30
1618EOF
1619 done
1620}
1621do_hitsets $hitset
1622
a4b75251
TL
1623do_rgw_create_bucket()
1624{
1625 # Create RGW Bucket
1626 local rgw_python_file='rgw-create-bucket.py'
1627 echo "import boto
1628import boto.s3.connection
1629
1630conn = boto.connect_s3(
1631 aws_access_key_id = '$s3_akey',
1632 aws_secret_access_key = '$s3_skey',
1633 host = '$HOSTNAME',
1634 port = 80,
1635 is_secure=False,
1636 calling_format = boto.s3.connection.OrdinaryCallingFormat(),
1637 )
1638
1639bucket = conn.create_bucket('nfs-bucket')
1640print('created new bucket')" > "$CEPH_OUT_DIR/$rgw_python_file"
1641 prun python $CEPH_OUT_DIR/$rgw_python_file
1642}
1643
224ce89b 1644do_rgw_create_users()
7c673cae
FG
1645{
1646 # Create S3 user
a4b75251
TL
1647 s3_akey='0555b35654ad1656d804'
1648 s3_skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
9f95a23c 1649 debug echo "setting up user testid"
a4b75251 1650 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $s3_akey --secret $s3_skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
7c673cae
FG
1651
1652 # Create S3-test users
1653 # See: https://github.com/ceph/s3-tests
9f95a23c 1654 debug echo "setting up s3-test users"
7c673cae
FG
1655 $CEPH_BIN/radosgw-admin user create \
1656 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1657 --access-key ABCDEFGHIJKLMNOPQRST \
1658 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
1659 --display-name youruseridhere \
1e59de90 1660 --email s3@example.com --caps="user-policy=*" -c $conf_fn > /dev/null
7c673cae
FG
1661 $CEPH_BIN/radosgw-admin user create \
1662 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
1663 --access-key NOPQRSTUVWXYZABCDEFG \
1664 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
1665 --display-name john.doe \
1666 --email john.doe@example.com -c $conf_fn > /dev/null
31f18b77
FG
1667 $CEPH_BIN/radosgw-admin user create \
1668 --tenant testx \
1669 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1670 --access-key HIJKLMNOPQRSTUVWXYZA \
1671 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
1672 --display-name tenanteduser \
1673 --email tenanteduser@example.com -c $conf_fn > /dev/null
7c673cae
FG
1674
1675 # Create Swift user
9f95a23c 1676 debug echo "setting up user tester"
7c673cae
FG
1677 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
1678
1679 echo ""
1680 echo "S3 User Info:"
a4b75251
TL
1681 echo " access key: $s3_akey"
1682 echo " secret key: $s3_skey"
7c673cae
FG
1683 echo ""
1684 echo "Swift User Info:"
1685 echo " account : test"
1686 echo " user : tester"
1687 echo " password : testing"
1688 echo ""
224ce89b 1689}
7c673cae 1690
224ce89b
WB
1691do_rgw()
1692{
1693 if [ "$new" -eq 1 ]; then
9f95a23c 1694 do_rgw_create_users
224ce89b 1695 if [ -n "$rgw_compression" ]; then
9f95a23c 1696 debug echo "setting compression type=$rgw_compression"
224ce89b
WB
1697 $CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
1698 fi
1699 fi
1e59de90
TL
1700
1701 if [ -n "$rgw_flight_frontend" ] ;then
1702 debug echo "starting arrow_flight frontend on first rgw"
1703 fi
1704
7c673cae 1705 # Start server
b3b6e05e
TL
1706 if [ "$cephadm" -gt 0 ]; then
1707 ceph_adm orch apply rgw rgwTest
1708 return
1709 fi
1710
7c673cae
FG
1711 RGWDEBUG=""
1712 if [ "$debug" -ne 0 ]; then
9f95a23c 1713 RGWDEBUG="--debug-rgw=20 --debug-ms=1"
7c673cae
FG
1714 fi
1715
11fdf7f2
TL
1716 local CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT}"
1717 local CEPH_RGW_HTTPS="${CEPH_RGW_PORT: -1}"
1718 if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
1719 CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT::-1}"
1720 else
1721 CEPH_RGW_HTTPS=""
1722 fi
7c673cae 1723 RGWSUDO=
11fdf7f2 1724 [ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO=sudo
9f95a23c
TL
1725
1726 current_port=$CEPH_RGW_PORT
1e59de90
TL
1727 # allow only first rgw to start arrow_flight server/port
1728 local flight_conf=$rgw_flight_frontend
9f95a23c
TL
1729 for n in $(seq 1 $CEPH_NUM_RGW); do
1730 rgw_name="client.rgw.${current_port}"
1731
1732 ceph_adm auth get-or-create $rgw_name \
1733 mon 'allow rw' \
1734 osd 'allow rwx' \
1735 mgr 'allow rw' \
1736 >> "$keyring_fn"
1737
1738 debug echo start rgw on http${CEPH_RGW_HTTPS}://localhost:${current_port}
1739 run 'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn \
1740 --log-file=${CEPH_OUT_DIR}/radosgw.${current_port}.log \
1741 --admin-socket=${CEPH_OUT_DIR}/radosgw.${current_port}.asok \
1742 --pid-file=${CEPH_OUT_DIR}/radosgw.${current_port}.pid \
f67539c2 1743 --rgw_luarocks_location=${CEPH_OUT_DIR}/luarocks \
9f95a23c
TL
1744 ${RGWDEBUG} \
1745 -n ${rgw_name} \
1e59de90 1746 "--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}${flight_conf:+,arrow_flight}"
9f95a23c 1747
11fdf7f2 1748 i=$(($i + 1))
224ce89b 1749 [ $i -eq $CEPH_NUM_RGW ] && break
9f95a23c
TL
1750
1751 current_port=$((current_port+1))
1e59de90 1752 unset flight_conf
7c673cae
FG
1753 done
1754}
1755if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1756 do_rgw
1757fi
1758
a4b75251
TL
1759# Ganesha Daemons
1760if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
1761 pseudo_path="/cephfs"
1762 if [ "$cephadm" -gt 0 ]; then
1763 cluster_id="vstart"
1764 port="2049"
1765 prun ceph_adm nfs cluster create $cluster_id
1766 if [ $CEPH_NUM_MDS -gt 0 ]; then
1767 prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path $pseudo_path
1768 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1769 fi
1770 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1771 pseudo_path="/rgw"
1772 do_rgw_create_bucket
1773 prun ceph_adm nfs export create rgw --cluster-id $cluster_id --pseudo-path $pseudo_path --bucket "nfs-bucket"
1774 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1775 fi
1776 else
1777 start_ganesha
1778 echo "Mount using: mount -t nfs -o port=<ganesha-port-num> $IP:$pseudo_path mountpoint"
1779 fi
1780fi
f67539c2 1781
a4b75251 1782docker_service(){
f67539c2
TL
1783 local service=''
1784 #prefer podman
20effc67 1785 if command -v podman > /dev/null; then
f67539c2
TL
1786 service="podman"
1787 elif pgrep -f docker > /dev/null; then
1788 service="docker"
1789 fi
1790 if [ -n "$service" ]; then
1791 echo "using $service for deploying jaeger..."
1792 #check for exited container, remove them and restart container
1793 if [ "$($service ps -aq -f status=exited -f name=jaeger)" ]; then
1794 $service rm jaeger
1795 fi
1796 if [ ! "$(podman ps -aq -f name=jaeger)" ]; then
1797 $service "$@"
1798 fi
1799 else
1800 echo "cannot find docker or podman, please restart service and rerun."
1801 fi
a4b75251 1802}
f67539c2
TL
1803
1804echo ""
1805if [ $with_jaeger -eq 1 ]; then
1806 debug echo "Enabling jaegertracing..."
1807 docker_service run -d --name jaeger \
1808 -p 5775:5775/udp \
1809 -p 6831:6831/udp \
1810 -p 6832:6832/udp \
1811 -p 5778:5778 \
1812 -p 16686:16686 \
1813 -p 14268:14268 \
1814 -p 14250:14250 \
20effc67 1815 quay.io/jaegertracing/all-in-one
f67539c2
TL
1816fi
1817
9f95a23c 1818debug echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
7c673cae 1819
31f18b77 1820echo ""
11fdf7f2
TL
1821if [ "$new" -eq 1 ]; then
1822 if $with_mgr_dashboard; then
20effc67
TL
1823 cat <<EOF
1824dashboard urls: $DASH_URLS
1825 w/ user/pass: admin / admin
1826EOF
1827 fi
1828 if $with_mgr_restful; then
1829 cat <<EOF
1830restful urls: $RESTFUL_URLS
1831 w/ user/pass: admin / $RESTFUL_SECRET
1832EOF
11fdf7f2 1833 fi
11fdf7f2 1834fi
20effc67 1835
7c673cae 1836echo ""
9f95a23c
TL
1837# add header to the environment file
1838{
1839 echo "#"
1840 echo "# source this file into your shell to set up the environment."
1841 echo "# For example:"
1842 echo "# $ . $CEPH_DIR/vstart_environment.sh"
1843 echo "#"
1844} > $CEPH_DIR/vstart_environment.sh
1845{
1846 echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
1847 echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
f67539c2 1848 echo "export PATH=$CEPH_DIR/bin:\$PATH"
1d09f67e
TL
1849
1850 if [ "$CEPH_DIR" != "$PWD" ]; then
1851 echo "export CEPH_CONF=$conf_fn"
1852 echo "export CEPH_KEYRING=$keyring_fn"
1853 fi
9f95a23c
TL
1854
1855 if [ -n "$CEPHFS_SHELL" ]; then
1856 echo "alias cephfs-shell=$CEPHFS_SHELL"
1857 fi
1858} | tee -a $CEPH_DIR/vstart_environment.sh
31f18b77
FG
1859
1860echo "CEPH_DEV=1"
1861
9f95a23c
TL
1862# always keep this section at the very bottom of this file
1863STRAY_CONF_PATH="/etc/ceph/ceph.conf"
1864if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
1865 echo ""
1866 echo ""
1867 echo "WARNING:"
1868 echo " Please remove stray $STRAY_CONF_PATH if not needed."
1869 echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
1870 echo " and may lead to undesired results."
1871 echo ""
1872 echo "NOTE:"
1873 echo " Remember to restart cluster after removing $STRAY_CONF_PATH"
1874fi
f67539c2
TL
1875
1876init_logrotate