-#!/bin/bash -x
+#!/usr/bin/env bash
+# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
+# vim: ts=8 sw=8 ft=bash smarttab
+set -x
-source $(dirname $0)/../ceph-helpers.sh
+source $(dirname $0)/../../standalone/ceph-helpers.sh
set -e
set -o functrace
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
SUDO=${SUDO:-sudo}
-
-function get_admin_socket()
-{
- local client=$1
-
- if test -n "$CEPH_OUT_DIR";
- then
- echo $CEPH_OUT_DIR/$client.asok
- else
- local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
- echo "/var/run/ceph/$cluster-$client.asok"
- fi
-}
+export CEPH_DEV=1
function check_no_osd_down()
{
if "$@"; then return 1; else return 0; fi
}
+function expect_true()
+{
+ set -x
+ if ! "$@"; then return 1; else return 0; fi
+}
TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
trap "rm -fr $TEMP_DIR" 0
if [ -n "$1" ]; then
whatch_opt=--watch-$1
+ if [ -n "$2" ]; then
+ whatch_opt+=" --watch-channel $2"
+ fi
fi
CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
function test_mon_injectargs()
{
- CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'false'"
- ! grep "the.dump" $TMPFILE || return 1
- ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
- ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'false'"
- ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'true'"
- ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
+ ceph tell osd.0 injectargs --no-osd_enable_op_tracker
+ ceph tell osd.0 config get osd_enable_op_tracker | grep false
+ ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500'
+ ceph tell osd.0 config get osd_enable_op_tracker | grep true
+ ceph tell osd.0 config get osd_op_history_duration | grep 500
+ ceph tell osd.0 injectargs --no-osd_enable_op_tracker
+ ceph tell osd.0 config get osd_enable_op_tracker | grep false
+ ceph tell osd.0 injectargs -- --osd_enable_op_tracker
+ ceph tell osd.0 config get osd_enable_op_tracker | grep true
+ ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600'
+ ceph tell osd.0 config get osd_enable_op_tracker | grep true
+ ceph tell osd.0 config get osd_op_history_duration | grep 600
+
+ ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200'
+ ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200
+
+ ceph tell osd.0 injectargs -- '--mon_probe_timeout 2'
+ ceph tell osd.0 config get mon_probe_timeout | grep 2
+
+ ceph tell osd.0 injectargs -- '--mon-lease 6'
+ ceph tell osd.0 config get mon_lease | grep 6
+
+ # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
+ expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 2> $TMPFILE || return 1
+ check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
+
expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
ceph tell osd.0 injectargs -- '--osd_op_history_duration'
- ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
- check_response "mon_lease = '6' (not observed, change may require restart)"
-
- # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
- expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1
}
function test_mon_injectargs_SI()
# We only aim at testing the units are parsed accordingly
# and don't intend to test whether the options being set
# actually expect SI units to be passed.
- # Keep in mind that all integer based options (i.e., INT,
- # LONG, U32, U64) will accept SI unit modifiers.
+ # Keep in mind that all integer based options that are not based on bytes
+ # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
+ # base 10.
initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
- check_response "'10F': (22) Invalid argument"
+ check_response "(22) Invalid argument"
# now test with injectargs
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
}
+function test_mon_injectargs_IEC()
+{
+ # Test IEC units during injectargs and 'config set'
+ # We only aim at testing the units are parsed accordingly
+ # and don't intend to test whether the options being set
+ # actually expect IEC units to be passed.
+ # Keep in mind that all integer based options that are based on bytes
+ # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
+ # unit modifiers (for backwards compatibility and convenience) and be parsed
+ # to base 2.
+ initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
+ $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
+ expect_config_value "mon.a" "mon_data_size_warn" 15000000000
+ $SUDO ceph daemon mon.a config set mon_data_size_warn 15G
+ expect_config_value "mon.a" "mon_data_size_warn" 16106127360
+ $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
+ expect_config_value "mon.a" "mon_data_size_warn" 17179869184
+ $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
+ check_response "(22) Invalid argument"
+ # now test with injectargs
+ ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
+ expect_config_value "mon.a" "mon_data_size_warn" 15000000000
+ ceph tell mon.a injectargs '--mon_data_size_warn 15G'
+ expect_config_value "mon.a" "mon_data_size_warn" 16106127360
+ ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
+ expect_config_value "mon.a" "mon_data_size_warn" 17179869184
+ expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
+ $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
+}
+
function test_tiering_agent()
{
local slow=slow_eviction
local fast=fast_eviction
ceph osd pool create $slow 1 1
+ ceph osd pool application enable $slow rados
ceph osd pool create $fast 1 1
ceph osd tier add $slow $fast
ceph osd tier cache-mode $fast writeback
ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
}
-function test_tiering()
+function test_tiering_1()
{
# tiering
ceph osd pool create slow 2
+ ceph osd pool application enable slow rados
ceph osd pool create slow2 2
+ ceph osd pool application enable slow2 rados
ceph osd pool create cache 2
ceph osd pool create cache2 2
ceph osd tier add slow cache
ceph osd tier add slow cache2
expect_false ceph osd tier add slow2 cache
+ # application metadata should propagate to the tiers
+ ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow") | .application_metadata["rados"]' | grep '{}'
+ ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow2") | .application_metadata["rados"]' | grep '{}'
+ ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache") | .application_metadata["rados"]' | grep '{}'
+ ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache2") | .application_metadata["rados"]' | grep '{}'
+ # forward and proxy are removed/deprecated
+ expect_false ceph osd tier cache-mode cache forward
+ expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it
+ expect_false ceph osd tier cache-mode cache proxy
+ expect_false ceph osd tier cache-mode cache proxy --yes-i-really-mean-it
# test some state transitions
ceph osd tier cache-mode cache writeback
- expect_false ceph osd tier cache-mode cache forward
- ceph osd tier cache-mode cache forward --yes-i-really-mean-it
expect_false ceph osd tier cache-mode cache readonly
+ expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
+ ceph osd tier cache-mode cache readproxy
+ ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
- expect_false ceph osd tier cache-mode cache forward
- ceph osd tier cache-mode cache forward --yes-i-really-mean-it
ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache writeback
- ceph osd tier cache-mode cache proxy
- ceph osd tier cache-mode cache writeback
expect_false ceph osd tier cache-mode cache none
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
# test with dirty objects in the tier pool
# tier pool currently set to 'writeback'
rados -p cache put /etc/passwd /etc/passwd
- ceph tell osd.\* flush_pg_stats || true
+ flush_pg_stats
# 1 dirty object in pool 'cache'
- ceph osd tier cache-mode cache proxy
+ ceph osd tier cache-mode cache readproxy
expect_false ceph osd tier cache-mode cache none
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
ceph osd tier cache-mode cache writeback
# remove object from tier pool
rados -p cache rm /etc/passwd
rados -p cache cache-flush-evict-all
- ceph tell osd.\* flush_pg_stats || true
+ flush_pg_stats
# no dirty objects in pool 'cache'
- ceph osd tier cache-mode cache proxy
+ ceph osd tier cache-mode cache readproxy
ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
TRIES=0
ceph osd pool ls detail | grep cache2
ceph osd pool ls detail -f json-pretty | grep cache2
+ ceph osd pool delete slow slow --yes-i-really-really-mean-it
+ ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
ceph osd pool delete cache cache --yes-i-really-really-mean-it
ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
+}
+function test_tiering_2()
+{
# make sure we can't clobber snapshot state
ceph osd pool create snap_base 2
+ ceph osd pool application enable snap_base rados
ceph osd pool create snap_cache 2
ceph osd pool mksnap snap_cache snapname
expect_false ceph osd tier add snap_base snap_cache
ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
+}
+function test_tiering_3()
+{
# make sure we can't create snapshot on tier
ceph osd pool create basex 2
+ ceph osd pool application enable basex rados
ceph osd pool create cachex 2
ceph osd tier add basex cachex
expect_false ceph osd pool mksnap cache snapname
ceph osd tier remove basex cachex
ceph osd pool delete basex basex --yes-i-really-really-mean-it
ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
+}
+function test_tiering_4()
+{
# make sure we can't create an ec pool tier
ceph osd pool create eccache 2 2 erasure
expect_false ceph osd set-require-min-compat-client bobtail
ceph osd pool create repbase 2
+ ceph osd pool application enable repbase rados
expect_false ceph osd tier add repbase eccache
ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
+}
+function test_tiering_5()
+{
# convenient add-cache command
+ ceph osd pool create slow 2
+ ceph osd pool application enable slow rados
ceph osd pool create cache3 2
ceph osd tier add-cache slow cache3 1024000
ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
ceph osd pool ls | grep cache3
ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
! ceph osd pool ls | grep cache3 || exit 1
-
- ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
ceph osd pool delete slow slow --yes-i-really-really-mean-it
+}
+function test_tiering_6()
+{
# check add-cache whether work
ceph osd pool create datapool 2
+ ceph osd pool application enable datapool rados
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd tier cache-mode cachepool writeback
ceph osd tier remove datapool cachepool
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
+}
+function test_tiering_7()
+{
# protection against pool removal when used as tiers
ceph osd pool create datapool 2
+ ceph osd pool application enable datapool rados
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
ceph osd tier remove datapool cachepool
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
+}
+function test_tiering_8()
+{
## check health check
ceph osd set notieragent
ceph osd pool create datapool 2
+ ceph osd pool application enable datapool rados
ceph osd pool create cache4 2
ceph osd tier add-cache datapool cache4 1024000
ceph osd tier cache-mode cache4 writeback
rados -p cache4 put foo1 $tmpfile
rados -p cache4 put foo2 $tmpfile
rm -f $tmpfile
- ceph tell osd.\* flush_pg_stats || true
+ flush_pg_stats
ceph df | grep datapool | grep ' 2 '
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cache4
ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
ceph osd unset notieragent
+}
-
+function test_tiering_9()
+{
# make sure 'tier remove' behaves as we expect
# i.e., removing a tier from a pool that's not its base pool only
# results in a 'pool foo is now (or already was) not a tier of bar'
#
ceph osd pool create basepoolA 2
+ ceph osd pool application enable basepoolA rados
ceph osd pool create basepoolB 2
+ ceph osd pool application enable basepoolB rados
poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
function test_auth()
{
- ceph auth add client.xx mon allow osd "allow *"
+ expect_false ceph auth add client.xx mon 'invalid' osd "allow *"
+ expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *"
+ ceph auth add client.xx mon 'allow *' osd "allow *"
ceph auth export client.xx >client.xx.keyring
ceph auth add client.xx -i client.xx.keyring
rm -f client.xx.keyring
ceph auth list | grep client.xx
+ ceph auth ls | grep client.xx
ceph auth get client.xx | grep caps | grep mon
ceph auth get client.xx | grep caps | grep osd
ceph auth get-key client.xx
ceph auth caps client.xx osd "allow rw"
expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
ceph auth get client.xx | grep osd | grep "allow rw"
+ ceph auth caps client.xx mon 'allow command "osd tree"'
ceph auth export | grep client.xx
ceph auth export -o authfile
- ceph auth import -i authfile
+ ceph auth import -i authfile 2>$TMPFILE
+ check_response "imported keyring"
+
ceph auth export -o authfile2
diff authfile authfile2
rm authfile authfile2
expect_false ceph auth get client.xx
# (almost) interactive mode
- echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
+ echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
ceph auth get client.xx
# script mode
echo 'auth del client.xx' | ceph
expect_false ceph auth get client.xx
-
- #
- # get / set auid
- #
- local auid=444
- ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
- expect_false ceph auth import --in-file TEST-keyring
- rm TEST-keyring
- ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
- ceph auth import --in-file TEST-keyring
- rm TEST-keyring
- ceph auth get client.TEST > $TMPFILE
- check_response "auid = $auid"
- ceph --format json-pretty auth get client.TEST > $TMPFILE
- check_response '"auid": '$auid
- ceph auth list > $TMPFILE
- check_response "auid: $auid"
- ceph --format json-pretty auth list > $TMPFILE
- check_response '"auid": '$auid
- ceph auth del client.TEST
}
function test_auth_profiles()
ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
- ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
# read-only gets access denied for rw commands or auth commands
ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
check_response "EACCES: access denied"
- ceph -n client.xx-profile-ro -k client.xx.keyring auth list >& $TMPFILE || true
+ ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
check_response "EACCES: access denied"
# read-write is allowed for all read-write commands (except auth)
ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
- ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
+ ceph -n client.xx-profile-rw -k client.xx.keyring fs dump
ceph -n client.xx-profile-rw -k client.xx.keyring log foo
ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
# read-write gets access denied for auth commands
- ceph -n client.xx-profile-rw -k client.xx.keyring auth list >& $TMPFILE || true
+ ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
check_response "EACCES: access denied"
# role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
- ceph -n client.xx-profile-rd -k client.xx.keyring auth list
+ ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
ceph -n client.xx-profile-rd -k client.xx.keyring auth export
ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
ceph -n client.xx-profile-rd -k client.xx.keyring status
# but read-write 'mon' commands are not
ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
check_response "EACCES: access denied"
- ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
+ ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
- rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
+ # pass --no-mon-config since we are looking for the permission denied error
+ rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
+ cat $TMPFILE
check_response "Permission denied"
rm -rf $TEMP_DIR/ceph.client.bug.keyring
ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
- rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
+ rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
check_response "Permission denied"
}
ceph osd dump | grep '^epoch'
ceph --concise osd dump | grep '^epoch'
+ ceph osd df | grep 'MIN/MAX VAR'
+
# df
ceph df > $TMPFILE
- grep GLOBAL $TMPFILE
+ grep RAW $TMPFILE
grep -v DIRTY $TMPFILE
ceph df detail > $TMPFILE
grep DIRTY $TMPFILE
ceph health --format json-pretty
ceph health detail --format xml-pretty
+ ceph time-sync-status
+
ceph node ls
- for t in mon osd mds ; do
+ for t in mon osd mds mgr ; do
ceph node ls $t
done
ceph_watch_start
mymsg="this is a test log message $$.$(date)"
ceph log "$mymsg"
+ ceph log last | grep "$mymsg"
+ ceph log last 100 | grep "$mymsg"
ceph_watch_wait "$mymsg"
+ ceph mgr stat
+ ceph mgr dump
+ ceph mgr module ls
+ ceph mgr module enable restful
+ expect_false ceph mgr module enable foodne
+ ceph mgr module enable foodne --force
+ ceph mgr module disable foodne
+ ceph mgr module disable foodnebizbangbash
+
ceph mon metadata a
ceph mon metadata
+ ceph mon count-metadata ceph_version
+ ceph mon versions
+
+ ceph mgr metadata
+ ceph mgr versions
+ ceph mgr count-metadata ceph_version
+
+ ceph versions
+
ceph node ls
}
function get_mds_gids()
{
fs_name=$1
- ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
+ ceph fs get $fs_name --format=json | python3 -c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
}
function fail_all_mds()
function remove_all_fs()
{
- existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
+ existing_fs=$(ceph fs ls --format=json | python3 -c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
for fs_name in $existing_fs ; do
echo "Removing fs ${fs_name}..."
fail_all_mds $fs_name
# in the cluster at all
function mds_exists()
{
- ceph auth list | grep "^mds"
+ ceph auth ls | grep "^mds"
}
# some of the commands are just not idempotent.
function test_mds_tell()
{
- FS_NAME=cephfs
+ local FS_NAME=cephfs
if ! mds_exists ; then
echo "Skipping test, no MDS found"
return
fi
remove_all_fs
- ceph osd pool create fs_data 10
- ceph osd pool create fs_metadata 10
+ ceph osd pool create fs_data 16
+ ceph osd pool create fs_metadata 16
ceph fs new $FS_NAME fs_metadata fs_data
wait_mds_active $FS_NAME
function test_mon_mds()
{
- FS_NAME=cephfs
+ local FS_NAME=cephfs
remove_all_fs
- ceph osd pool create fs_data 10
- ceph osd pool create fs_metadata 10
+ ceph osd pool create fs_data 16
+ ceph osd pool create fs_metadata 16
ceph fs new $FS_NAME fs_metadata fs_data
ceph fs set $FS_NAME cluster_down true
ceph fs set $FS_NAME cluster_down false
- # Legacy commands, act on default fs
- ceph mds cluster_down
- ceph mds cluster_up
-
ceph mds compat rm_incompat 4
ceph mds compat rm_incompat 4
fail_all_mds $FS_NAME
ceph mds compat show
- expect_false ceph mds deactivate 2
- ceph mds dump
ceph fs dump
ceph fs get $FS_NAME
for mds_gid in $(get_mds_gids $FS_NAME) ; do
ceph mds metadata $mds_id
done
ceph mds metadata
+ ceph mds versions
+ ceph mds count-metadata os
# XXX mds fail, but how do you undo it?
mdsmapfile=$TEMP_DIR/mdsmap.$$
- current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
+ current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
[ -s $mdsmapfile ]
rm $mdsmapfile
- ceph osd pool create data2 10
- ceph osd pool create data3 10
+ ceph osd pool create data2 16
+ ceph osd pool create data3 16
data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
- ceph mds add_data_pool $data2_pool
- ceph mds add_data_pool $data3_pool
- ceph mds add_data_pool 100 >& $TMPFILE || true
+ ceph fs add_data_pool cephfs $data2_pool
+ ceph fs add_data_pool cephfs $data3_pool
+ ceph fs add_data_pool cephfs 100 >& $TMPFILE || true
check_response "Error ENOENT"
- ceph mds add_data_pool foobarbaz >& $TMPFILE || true
+ ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true
check_response "Error ENOENT"
- ceph mds remove_data_pool $data2_pool
- ceph mds remove_data_pool $data3_pool
+ ceph fs rm_data_pool cephfs $data2_pool
+ ceph fs rm_data_pool cephfs $data3_pool
ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
- expect_false ceph mds set_max_mds 4
- ceph mds set allow_multimds true --yes-i-really-mean-it
- ceph mds set_max_mds 4
- ceph mds set_max_mds 3
- ceph mds set_max_mds 256
- expect_false ceph mds set_max_mds 257
- ceph mds set max_mds 4
- ceph mds set max_mds 256
- expect_false ceph mds set max_mds 257
- expect_false ceph mds set max_mds asdf
- expect_false ceph mds set inline_data true
- ceph mds set inline_data true --yes-i-really-mean-it
- ceph mds set inline_data yes --yes-i-really-mean-it
- ceph mds set inline_data 1 --yes-i-really-mean-it
- expect_false ceph mds set inline_data --yes-i-really-mean-it
- ceph mds set inline_data false
- ceph mds set inline_data no
- ceph mds set inline_data 0
- expect_false ceph mds set inline_data asdf
- ceph mds set max_file_size 1048576
- expect_false ceph mds set max_file_size 123asdf
-
- expect_false ceph mds set allow_new_snaps
- expect_false ceph mds set allow_new_snaps true
- ceph mds set allow_new_snaps true --yes-i-really-mean-it
- ceph mds set allow_new_snaps 0
- ceph mds set allow_new_snaps false
- ceph mds set allow_new_snaps no
- expect_false ceph mds set allow_new_snaps taco
+ ceph fs set cephfs max_mds 4
+ ceph fs set cephfs max_mds 3
+ ceph fs set cephfs max_mds 256
+ expect_false ceph fs set cephfs max_mds 257
+ ceph fs set cephfs max_mds 4
+ ceph fs set cephfs max_mds 256
+ expect_false ceph fs set cephfs max_mds 257
+ expect_false ceph fs set cephfs max_mds asdf
+ expect_false ceph fs set cephfs inline_data true
+ ceph fs set cephfs inline_data true --yes-i-really-really-mean-it
+ ceph fs set cephfs inline_data yes --yes-i-really-really-mean-it
+ ceph fs set cephfs inline_data 1 --yes-i-really-really-mean-it
+ expect_false ceph fs set cephfs inline_data --yes-i-really-really-mean-it
+ ceph fs set cephfs inline_data false
+ ceph fs set cephfs inline_data no
+ ceph fs set cephfs inline_data 0
+ expect_false ceph fs set cephfs inline_data asdf
+ ceph fs set cephfs max_file_size 1048576
+ expect_false ceph fs set cephfs max_file_size 123asdf
+
+ expect_false ceph fs set cephfs allow_new_snaps
+ ceph fs set cephfs allow_new_snaps true
+ ceph fs set cephfs allow_new_snaps 0
+ ceph fs set cephfs allow_new_snaps false
+ ceph fs set cephfs allow_new_snaps no
+ expect_false ceph fs set cephfs allow_new_snaps taco
# we should never be able to add EC pools as data or metadata pools
# create an ec-pool...
- ceph osd pool create mds-ec-pool 10 10 erasure
+ ceph osd pool create mds-ec-pool 16 16 erasure
set +e
- ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
+ ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE
check_response 'erasure-code' $? 22
set -e
ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
ceph mds rmfailed 0 --yes-i-really-mean-it
set -e
- # Check that `newfs` is no longer permitted
- expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
+ # Check that `fs new` is no longer permitted
+ expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
# Check that 'fs reset' runs
ceph fs reset $FS_NAME --yes-i-really-mean-it
# Check that creating a second FS fails by default
- ceph osd pool create fs_metadata2 10
- ceph osd pool create fs_data2 10
+ ceph osd pool create fs_metadata2 16
+ ceph osd pool create fs_data2 16
set +e
expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
set -e
# Removing tier should be permitted because the underlying pool is
# replicated (#11504 case)
- ceph osd tier cache-mode mds-tier proxy
+ ceph osd tier cache-mode mds-tier readproxy
ceph osd tier remove-overlay fs_metadata
ceph osd tier remove fs_metadata mds-tier
ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
# ceph mds rm
# ceph mds rmfailed
# ceph mds set_state
- # ceph mds stop
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
test "$nmons" -gt 0
- ceph mds dump |
+ ceph fs dump |
sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
while read gid id rank; do
ceph mds metadata ${gid} | grep '"hostname":'
{
# print help message
ceph --help mon
+ # -h works even when some arguments are passed
+ ceph osd dump -h | grep 'osd dump'
+ ceph osd dump 123 -h | grep 'osd dump'
# no mon add/remove
ceph mon dump
ceph mon getmap -o $TEMP_DIR/monmap.$$
[ -s $TEMP_DIR/monmap.$$ ]
+
# ceph mon tell
- ceph mon_status
+ first=$(ceph mon dump -f json | jq -r '.mons[0].name')
+ ceph tell mon.$first mon_status
# test mon features
- ceph mon feature list
+ ceph mon feature ls
ceph mon feature set kraken --yes-i-really-mean-it
expect_false ceph mon feature set abcd
expect_false ceph mon feature set abcd --yes-i-really-mean-it
+
+ # test elector
+ expect_failure $TEMP_DIR ceph mon add disallowed_leader $first
+ ceph mon set election_strategy disallow
+ ceph mon add disallowed_leader $first
+ ceph mon set election_strategy connectivity
+ ceph mon rm disallowed_leader $first
+ ceph mon set election_strategy classic
+ expect_failure $TEMP_DIR ceph mon rm disallowed_leader $first
+
+ # test mon stat
+ # don't check output, just ensure it does not fail.
+ ceph mon stat
+ ceph mon stat -f json | jq '.'
+}
+
+function test_mon_priority_and_weight()
+{
+ for i in 0 1 65535; do
+ ceph mon set-weight a $i
+ w=$(ceph mon dump --format=json-pretty 2>/dev/null | jq '.mons[0].weight')
+ [[ "$w" == "$i" ]]
+ done
+
+ for i in -1 65536; do
+ expect_false ceph mon set-weight a $i
+ done
+}
+
+function gen_secrets_file()
+{
+ # lets assume we can have the following types
+ # all - generates both cephx and lockbox, with mock dm-crypt key
+ # cephx - only cephx
+ # no_cephx - lockbox and dm-crypt, no cephx
+ # no_lockbox - dm-crypt and cephx, no lockbox
+ # empty - empty file
+ # empty_json - correct json, empty map
+ # bad_json - bad json :)
+ #
+ local t=$1
+ if [[ -z "$t" ]]; then
+ t="all"
+ fi
+
+ fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
+ echo $fn
+ if [[ "$t" == "empty" ]]; then
+ return 0
+ fi
+
+ echo "{" > $fn
+ if [[ "$t" == "bad_json" ]]; then
+ echo "asd: ; }" >> $fn
+ return 0
+ elif [[ "$t" == "empty_json" ]]; then
+ echo "}" >> $fn
+ return 0
+ fi
+
+ cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
+ lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
+ dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
+
+ if [[ "$t" == "all" ]]; then
+ echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
+ elif [[ "$t" == "cephx" ]]; then
+ echo "$cephx_secret" >> $fn
+ elif [[ "$t" == "no_cephx" ]]; then
+ echo "$lb_secret,$dmcrypt_key" >> $fn
+ elif [[ "$t" == "no_lockbox" ]]; then
+ echo "$cephx_secret,$dmcrypt_key" >> $fn
+ else
+ echo "unknown gen_secrets_file() type \'$fn\'"
+ return 1
+ fi
+ echo "}" >> $fn
+ return 0
+}
+
+function test_mon_osd_create_destroy()
+{
+ ceph osd new 2>&1 | grep 'EINVAL'
+ ceph osd new '' -1 2>&1 | grep 'EINVAL'
+ ceph osd new '' 10 2>&1 | grep 'EINVAL'
+
+ old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
+
+ old_osds=$(ceph osd ls)
+ num_osds=$(ceph osd ls | wc -l)
+
+ uuid=$(uuidgen)
+ id=$(ceph osd new $uuid 2>/dev/null)
+
+ for i in $old_osds; do
+ [[ "$i" != "$id" ]]
+ done
+
+ ceph osd find $id
+
+ id2=`ceph osd new $uuid 2>/dev/null`
+
+ [[ $id2 == $id ]]
+
+ ceph osd new $uuid $id
+
+ id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
+ ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
+
+ uuid2=$(uuidgen)
+ id2=$(ceph osd new $uuid2)
+ ceph osd find $id2
+ [[ "$id2" != "$id" ]]
+
+ ceph osd new $uuid $id2 2>&1 | grep EEXIST
+ ceph osd new $uuid2 $id2
+
+ # test with secrets
+ empty_secrets=$(gen_secrets_file "empty")
+ empty_json=$(gen_secrets_file "empty_json")
+ all_secrets=$(gen_secrets_file "all")
+ cephx_only=$(gen_secrets_file "cephx")
+ no_cephx=$(gen_secrets_file "no_cephx")
+ no_lockbox=$(gen_secrets_file "no_lockbox")
+ bad_json=$(gen_secrets_file "bad_json")
+
+ # empty secrets should be idempotent
+ new_id=$(ceph osd new $uuid $id -i $empty_secrets)
+ [[ "$new_id" == "$id" ]]
+
+ # empty json, thus empty secrets
+ new_id=$(ceph osd new $uuid $id -i $empty_json)
+ [[ "$new_id" == "$id" ]]
+
+ ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
+
+ ceph osd rm $id
+ ceph osd rm $id2
+ ceph osd setmaxosd $old_maxosd
+
+ ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
+ ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
+
+ osds=$(ceph osd ls)
+ id=$(ceph osd new $uuid -i $all_secrets)
+ for i in $osds; do
+ [[ "$i" != "$id" ]]
+ done
+
+ ceph osd find $id
+
+ # validate secrets and dm-crypt are set
+ k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
+ s=$(cat $all_secrets | jq '.cephx_secret')
+ [[ $k == $s ]]
+ k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
+ jq '.key')
+ s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
+ [[ $k == $s ]]
+ ceph config-key exists dm-crypt/osd/$uuid/luks
+
+ osds=$(ceph osd ls)
+ id2=$(ceph osd new $uuid2 -i $cephx_only)
+ for i in $osds; do
+ [[ "$i" != "$id2" ]]
+ done
+
+ ceph osd find $id2
+ k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
+ s=$(cat $all_secrets | jq '.cephx_secret')
+ [[ $k == $s ]]
+ expect_false ceph auth get-key client.osd-lockbox.$uuid2
+ expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
+
+ ceph osd destroy osd.$id2 --yes-i-really-mean-it
+ ceph osd destroy $id2 --yes-i-really-mean-it
+ ceph osd find $id2
+ expect_false ceph auth get-key osd.$id2
+ ceph osd dump | grep osd.$id2 | grep destroyed
+
+ id3=$id2
+ uuid3=$(uuidgen)
+ ceph osd new $uuid3 $id3 -i $all_secrets
+ ceph osd dump | grep osd.$id3 | expect_false grep destroyed
+ ceph auth get-key client.osd-lockbox.$uuid3
+ ceph auth get-key osd.$id3
+ ceph config-key exists dm-crypt/osd/$uuid3/luks
+
+ ceph osd purge-new osd.$id3 --yes-i-really-mean-it
+ expect_false ceph osd find $id2
+ expect_false ceph auth get-key osd.$id2
+ expect_false ceph auth get-key client.osd-lockbox.$uuid3
+ expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
+ ceph osd purge osd.$id3 --yes-i-really-mean-it
+ ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent
+
+ ceph osd purge osd.$id --yes-i-really-mean-it
+ ceph osd purge 123456 --yes-i-really-mean-it
+ expect_false ceph osd find $id
+ expect_false ceph auth get-key osd.$id
+ expect_false ceph auth get-key client.osd-lockbox.$uuid
+ expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
+
+ rm $empty_secrets $empty_json $all_secrets $cephx_only \
+ $no_cephx $no_lockbox $bad_json
+
+ for i in $(ceph osd ls); do
+ [[ "$i" != "$id" ]]
+ [[ "$i" != "$id2" ]]
+ [[ "$i" != "$id3" ]]
+ done
+
+ [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
+ ceph osd setmaxosd $old_maxosd
+
+}
+
+function test_mon_config_key()
+{
+ key=asdfasdfqwerqwreasdfuniquesa123df
+ ceph config-key list | grep -c $key | grep 0
+ ceph config-key get $key | grep -c bar | grep 0
+ ceph config-key set $key bar
+ ceph config-key get $key | grep bar
+ ceph config-key list | grep -c $key | grep 1
+ ceph config-key dump | grep $key | grep bar
+ ceph config-key rm $key
+ expect_false ceph config-key get $key
+ ceph config-key list | grep -c $key | grep 0
+ ceph config-key dump | grep -c $key | grep 0
}
function test_mon_osd()
{
#
- # osd blacklist
+ # osd blocklist
#
bl=192.168.0.1:0/1000
- ceph osd blacklist add $bl
- ceph osd blacklist ls | grep $bl
- ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
+ ceph osd blocklist add $bl
+ ceph osd blocklist ls | grep $bl
+ ceph osd blocklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
ceph osd dump --format=json-pretty | grep $bl
- ceph osd dump | grep "^blacklist $bl"
- ceph osd blacklist rm $bl
- ceph osd blacklist ls | expect_false grep $bl
+ ceph osd dump | grep $bl
+ ceph osd blocklist rm $bl
+ ceph osd blocklist ls | expect_false grep $bl
bl=192.168.0.1
# test without nonce, invalid nonce
- ceph osd blacklist add $bl
- ceph osd blacklist ls | grep $bl
- ceph osd blacklist rm $bl
- ceph osd blacklist ls | expect_false grep $expect_false bl
- expect_false "ceph osd blacklist $bl/-1"
- expect_false "ceph osd blacklist $bl/foo"
-
- # test with wrong address
- expect_false "ceph osd blacklist 1234.56.78.90/100"
+ ceph osd blocklist add $bl
+ ceph osd blocklist ls | grep $bl
+ ceph osd blocklist rm $bl
+ ceph osd blocklist ls | expect_false grep $bl
+ expect_false "ceph osd blocklist add $bl/-1"
+ expect_false "ceph osd blocklist add $bl/foo"
+
+ # test with invalid address
+ expect_false "ceph osd blocklist add 1234.56.78.90/100"
+
+ # test range blocklisting
+ bl=192.168.0.1:0/24
+ ceph osd blocklist range add $bl
+ ceph osd blocklist ls | grep $bl
+ ceph osd blocklist range rm $bl
+ ceph osd blocklist ls | expect_false grep $bl
+ bad_bl=192.168.0.1/33
+ expect_false ceph osd blocklist range add $bad_bl
# Test `clear`
- ceph osd blacklist add $bl
- ceph osd blacklist ls | grep $bl
- ceph osd blacklist clear
- ceph osd blacklist ls | expect_false grep $bl
+ ceph osd blocklist add $bl
+ ceph osd blocklist ls | grep $bl
+ ceph osd blocklist clear
+ ceph osd blocklist ls | expect_false grep $bl
+
+ # deprecated syntax?
+ ceph osd blacklist ls
#
# osd crush
#
# require-min-compat-client
expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
- ceph osd set-require-min-compat-client luminous
+ ceph osd get-require-min-compat-client | grep luminous
ceph osd dump | grep 'require_min_compat_client luminous'
#
# osd scrub
#
+
+ # blocking
+ ceph osd scrub 0 --block
+ ceph osd deep-scrub 0 --block
+
# how do I tell when these are done?
ceph osd scrub 0
ceph osd deep-scrub 0
ceph osd repair 0
- for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
+ # pool scrub, force-recovery/backfill
+ pool_names=`rados lspools`
+ for pool_name in $pool_names
+ do
+ ceph osd pool scrub $pool_name
+ ceph osd pool deep-scrub $pool_name
+ ceph osd pool repair $pool_name
+ ceph osd pool force-recovery $pool_name
+ ceph osd pool cancel-force-recovery $pool_name
+ ceph osd pool force-backfill $pool_name
+ ceph osd pool cancel-force-backfill $pool_name
+ done
+
+ for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \
+ norebalance norecover notieragent
do
ceph osd set $f
ceph osd unset $f
done
- expect_false ceph osd unset sortbitwise # cannot be unset
expect_false ceph osd set bogus
expect_false ceph osd unset bogus
- ceph osd set require_jewel_osds
- expect_false ceph osd unset require_jewel_osds
- ceph osd set require_kraken_osds
- expect_false ceph osd unset require_kraken_osds
+ for f in sortbitwise recover_deletes require_jewel_osds \
+ require_kraken_osds
+ do
+ expect_false ceph osd set $f
+ expect_false ceph osd unset $f
+ done
+ ceph osd require-osd-release quincy
+ # can't lower
+ expect_false ceph osd require-osd-release pacific
+ expect_false ceph osd require-osd-release octopus
+ # these are no-ops but should succeed.
ceph osd set noup
ceph osd down 0
ceph osd dump | grep 'osd.0.*in'
ceph osd find 0
+ ceph osd info 0
+ ceph osd info osd.0
+ expect_false ceph osd info osd.xyz
+ expect_false ceph osd info xyz
+ expect_false ceph osd info 42
+ expect_false ceph osd info osd.42
+
+ ceph osd info
+ info_json=$(ceph osd info --format=json | jq -cM '.')
+ dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
+ if [[ "${info_json}" != "${dump_json}" ]]; then
+ echo "waiting for OSDs to settle"
+ sleep 10
+ info_json=$(ceph osd info --format=json | jq -cM '.')
+ dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
+ [[ "${info_json}" == "${dump_json}" ]]
+ fi
+
+ info_json=$(ceph osd info 0 --format=json | jq -cM '.')
+ dump_json=$(ceph osd dump --format=json | \
+ jq -cM '.osds[] | select(.osd == 0)')
+ [[ "${info_json}" == "${dump_json}" ]]
+
+ info_plain="$(ceph osd info)"
+ dump_plain="$(ceph osd dump | grep '^osd')"
+ [[ "${info_plain}" == "${dump_plain}" ]]
+
+ info_plain="$(ceph osd info 0)"
+ dump_plain="$(ceph osd dump | grep '^osd.0')"
+ [[ "${info_plain}" == "${dump_plain}" ]]
+
+ ceph osd add-nodown 0 1
+ ceph health detail | grep 'NODOWN'
+ ceph osd rm-nodown 0 1
+ ! ceph health detail | grep 'NODOWN'
+
+ ceph osd out 0 # so we can mark it as noin later
+ ceph osd add-noin 0
+ ceph health detail | grep 'NOIN'
+ ceph osd rm-noin 0
+ ! ceph health detail | grep 'NOIN'
+ ceph osd in 0
+
+ ceph osd add-noout 0
+ ceph health detail | grep 'NOOUT'
+ ceph osd rm-noout 0
+ ! ceph health detail | grep 'NOOUT'
+
+ # test osd id parse
+ expect_false ceph osd add-noup 797er
+ expect_false ceph osd add-nodown u9uwer
+ expect_false ceph osd add-noin 78~15
+
+ expect_false ceph osd rm-noup 1234567
+ expect_false ceph osd rm-nodown fsadf7
+ expect_false ceph osd rm-noout 790-fd
+
+ ids=`ceph osd ls-tree default`
+ for osd in $ids
+ do
+ ceph osd add-nodown $osd
+ ceph osd add-noout $osd
+ done
+ ceph -s | grep 'NODOWN'
+ ceph -s | grep 'NOOUT'
+ ceph osd rm-nodown any
+ ceph osd rm-noout all
+ ! ceph -s | grep 'NODOWN'
+ ! ceph -s | grep 'NOOUT'
+
+ # test crush node flags
+ ceph osd add-noup osd.0
+ ceph osd add-nodown osd.0
+ ceph osd add-noin osd.0
+ ceph osd add-noout osd.0
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
+ ceph osd rm-noup osd.0
+ ceph osd rm-nodown osd.0
+ ceph osd rm-noin osd.0
+ ceph osd rm-noout osd.0
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
+
+ ceph osd crush add-bucket foo host root=default
+ ceph osd add-noup foo
+ ceph osd add-nodown foo
+ ceph osd add-noin foo
+ ceph osd add-noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
+ ceph osd rm-noup foo
+ ceph osd rm-nodown foo
+ ceph osd rm-noin foo
+ ceph osd rm-noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
+ ceph osd add-noup foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
+ ceph osd crush rm foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
+
+ ceph osd set-group noup osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd set-group noup,nodown osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd set-group noup,nodown,noin osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd set-group noup,nodown,noin,noout osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd unset-group noup osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd unset-group noup,nodown osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin,noout osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
+
+ ceph osd set-group noup,nodown,noin,noout osd.0 osd.1
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout'
+
+ ceph osd set-group noup all
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd unset-group noup all
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
+
+ # crush node flags
+ ceph osd crush add-bucket foo host root=default
+ ceph osd set-group noup foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd set-group noup,nodown foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd set-group noup,nodown,noin foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd set-group noup,nodown,noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+
+ ceph osd unset-group noup foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd unset-group noup,nodown foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout'
+
+ ceph osd set-group noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd unset-group noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
+
+ ceph osd set-group noup,nodown,noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd crush rm foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
+
+ # test device class flags
+ osd_0_device_class=$(ceph osd crush get-device-class osd.0)
+ ceph osd set-group noup $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
+ ceph osd set-group noup,nodown $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
+ ceph osd set-group noup,nodown,noin $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd set-group noup,nodown,noin,noout $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+
+ ceph osd unset-group noup $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+ ceph osd unset-group noup,nodown $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout'
+
+ ceph osd set-group noin,noout $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+ ceph osd unset-group noin,noout $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class
+
# make sure mark out preserves weight
ceph osd reweight osd.0 .5
ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
ceph osd in 0
ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
- f=$TEMP_DIR/map.$$
- ceph osd getcrushmap -o $f
- [ -s $f ]
- ceph osd setcrushmap -i $f
- rm $f
ceph osd getmap -o $f
[ -s $f ]
rm $f
max_osd=$((max_osd + 1))
ceph osd getmaxosd | grep "max_osd = $max_osd"
- ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EINVAL'
- ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EINVAL'
+ ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
+ ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
id2=`ceph osd create $uuid`
[ "$id" = "$id2" ]
id2=`ceph osd create $uuid $id`
max_osd=$((id + 1))
ceph osd getmaxosd | grep "max_osd = $max_osd"
- ceph osd create $uuid $gap_start 2>&1 | grep 'EINVAL'
+ ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
#
# When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
# is repeated and consumes two osd id, not just one.
#
- local next_osd
- if test "$CEPH_CLI_TEST_DUP_COMMAND" ; then
- next_osd=$((gap_start + 1))
- else
- next_osd=$gap_start
- fi
- id=`ceph osd create`
- [ "$id" = "$next_osd" ]
-
- next_osd=$((id + 1))
+ local next_osd=$gap_start
id=`ceph osd create $(uuidgen)`
[ "$id" = "$next_osd" ]
ceph osd setmaxosd $save
ceph osd ls
- ceph osd pool create data 10
+ ceph osd pool create data 16
+ ceph osd pool application enable data rados
ceph osd lspools | grep data
ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
ceph osd unpause
ceph osd tree
+ ceph osd tree up
+ ceph osd tree down
+ ceph osd tree in
+ ceph osd tree out
+ ceph osd tree destroyed
+ ceph osd tree up in
+ ceph osd tree up out
+ ceph osd tree down in
+ ceph osd tree down out
+ ceph osd tree out down
+ expect_false ceph osd tree up down
+ expect_false ceph osd tree up destroyed
+ expect_false ceph osd tree down destroyed
+ expect_false ceph osd tree up down destroyed
+ expect_false ceph osd tree in out
+ expect_false ceph osd tree up foo
+
+ ceph osd metadata
+ ceph osd count-metadata os
+ ceph osd versions
+
ceph osd perf
ceph osd blocked-by
- ceph osd stat | grep up,
+ ceph osd stat | grep up
+}
+
+function test_mon_crush()
+{
+ f=$TEMP_DIR/map.$$
+ epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
+ [ -s $f ]
+ [ "$epoch" -gt 1 ]
+ nextepoch=$(( $epoch + 1 ))
+ echo epoch $epoch nextepoch $nextepoch
+ rm -f $f.epoch
+ expect_false ceph osd setcrushmap $nextepoch -i $f
+ gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
+ echo gotepoch $gotepoch
+ [ "$gotepoch" -eq "$nextepoch" ]
+ # should be idempotent
+ gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
+ echo epoch $gotepoch
+ [ "$gotepoch" -eq "$nextepoch" ]
+ rm $f
}
function test_mon_osd_pool()
#
# osd pool
#
- ceph osd pool create data 10
+ ceph osd pool create data 16
+ ceph osd pool application enable data rados
ceph osd pool mksnap data datasnap
rados -p data lssnap | grep datasnap
ceph osd pool rmsnap data datasnap
expect_false ceph osd pool rmsnap pool_fake snapshot
ceph osd pool delete data data --yes-i-really-really-mean-it
- ceph osd pool create data2 10
+ ceph osd pool create data2 16
+ ceph osd pool application enable data2 rados
ceph osd pool rename data2 data3
ceph osd lspools | grep data3
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
- ceph osd pool create replicated 12 12 replicated
- ceph osd pool create replicated 12 12 replicated
- ceph osd pool create replicated 12 12 # default is replicated
- ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
+ ceph osd pool create replicated 16 16 replicated
+ ceph osd pool create replicated 1 16 replicated
+ ceph osd pool create replicated 16 16 # default is replicated
+ ceph osd pool create replicated 16 # default is replicated, pgp_num = pg_num
+ ceph osd pool application enable replicated rados
# should fail because the type is not the same
- expect_false ceph osd pool create replicated 12 12 erasure
+ expect_false ceph osd pool create replicated 16 16 erasure
ceph osd lspools | grep replicated
ceph osd pool create ec_test 1 1 erasure
+ ceph osd pool application enable ec_test rados
set +e
- ceph osd metadata | grep osd_objectstore_type | grep -qc bluestore
- if [ $? -eq 0 ]; then
+ ceph osd count-metadata osd_objectstore | grep 'bluestore'
+ if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
- check_response $? 22 "pool must only be stored on bluestore for scrubbing to work"
+ check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
else
ceph osd pool set ec_test allow_ec_overwrites true || return 1
expect_false ceph osd pool set ec_test allow_ec_overwrites false
set -e
ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
+
+ # test create pool with rule
+ ceph osd erasure-code-profile set foo foo
+ ceph osd erasure-code-profile ls | grep foo
+ ceph osd crush rule create-erasure foo foo
+ ceph osd pool create erasure 16 16 erasure foo
+ expect_false ceph osd erasure-code-profile rm foo
+ ceph osd pool delete erasure erasure --yes-i-really-really-mean-it
+ ceph osd crush rule rm foo
+ ceph osd erasure-code-profile rm foo
+
+ # autoscale mode
+ ceph osd pool create modeon --autoscale-mode=on
+ ceph osd dump | grep modeon | grep 'autoscale_mode on'
+ ceph osd pool create modewarn --autoscale-mode=warn
+ ceph osd dump | grep modewarn | grep 'autoscale_mode warn'
+ ceph osd pool create modeoff --autoscale-mode=off
+ ceph osd dump | grep modeoff | grep 'autoscale_mode off'
+ ceph osd pool delete modeon modeon --yes-i-really-really-mean-it
+ ceph osd pool delete modewarn modewarn --yes-i-really-really-mean-it
+ ceph osd pool delete modeoff modeoff --yes-i-really-really-mean-it
}
function test_mon_osd_pool_quota()
#
# create tmp pool
- ceph osd pool create tmp-quota-pool 36
+ ceph osd pool create tmp-quota-pool 32
+ ceph osd pool application enable tmp-quota-pool rados
#
# set erroneous quotas
#
ceph osd pool set-quota tmp-quota-pool max_bytes 10
ceph osd pool set-quota tmp-quota-pool max_objects 10M
#
- # get quotas
- #
- ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
- ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10240k objects'
- #
# get quotas in json-pretty format
#
ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
- grep '"quota_max_objects":.*10485760'
+ grep '"quota_max_objects":.*10000000'
ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
grep '"quota_max_bytes":.*10'
#
+ # get quotas
+ #
+ ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B'
+ ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects'
+ #
+ # set valid quotas with unit prefix
+ #
+ ceph osd pool set-quota tmp-quota-pool max_bytes 10K
+ #
+ # get quotas
+ #
+ ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
+ #
+ # set valid quotas with unit prefix
+ #
+ ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
+ #
+ # get quotas
+ #
+ ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
+ #
+ #
# reset pool quotas
#
ceph osd pool set-quota tmp-quota-pool max_bytes 0
ceph pg debug unfound_objects_exist
ceph pg debug degraded_pgs_exist
- ceph pg deep-scrub 0.0
+ ceph pg deep-scrub 1.0
ceph pg dump
ceph pg dump pgs_brief --format=json
ceph pg dump pgs --format=json
ceph pg dump_stuck undersized
ceph pg dump_stuck degraded
ceph pg ls
- ceph pg ls 0
+ ceph pg ls 1
ceph pg ls stale
expect_false ceph pg ls scrubq
ceph pg ls active stale repair recovering
- ceph pg ls 0 active
- ceph pg ls 0 active stale
+ ceph pg ls 1 active
+ ceph pg ls 1 active stale
ceph pg ls-by-primary osd.0
- ceph pg ls-by-primary osd.0 0
+ ceph pg ls-by-primary osd.0 1
ceph pg ls-by-primary osd.0 active
ceph pg ls-by-primary osd.0 active stale
- ceph pg ls-by-primary osd.0 0 active stale
+ ceph pg ls-by-primary osd.0 1 active stale
ceph pg ls-by-osd osd.0
- ceph pg ls-by-osd osd.0 0
+ ceph pg ls-by-osd osd.0 1
ceph pg ls-by-osd osd.0 active
ceph pg ls-by-osd osd.0 active stale
- ceph pg ls-by-osd osd.0 0 active stale
+ ceph pg ls-by-osd osd.0 1 active stale
ceph pg ls-by-pool rbd
ceph pg ls-by-pool rbd active stale
# can't test this...
# ceph pg force_create_pg
ceph pg getmap -o $TEMP_DIR/map.$$
[ -s $TEMP_DIR/map.$$ ]
- ceph pg map 0.0 | grep acting
- ceph pg repair 0.0
- ceph pg scrub 0.0
+ ceph pg map 1.0 | grep acting
+ ceph pg repair 1.0
+ ceph pg scrub 1.0
ceph osd set-full-ratio .962
ceph osd dump | grep '^full_ratio 0.962'
# Check health status
ceph osd set-nearfull-ratio .913
- ceph health | grep 'HEALTH_ERR.*Full ratio(s) out of order'
- ceph health detail | grep 'backfillfull_ratio (0.912) < nearfull_ratio (0.913), increased'
+ ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
+ ceph health detail | grep OSD_OUT_OF_ORDER_FULL
ceph osd set-nearfull-ratio .892
ceph osd set-backfillfull-ratio .963
- ceph health detail | grep 'full_ratio (0.962) < backfillfull_ratio (0.963), increased'
+ ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
+ ceph health detail | grep OSD_OUT_OF_ORDER_FULL
ceph osd set-backfillfull-ratio .912
# Check injected full results
- $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
- wait_for_health "HEALTH_WARN.*1 nearfull osd(s)"
- $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
- wait_for_health "HEALTH_WARN.*1 backfillfull osd(s)"
- $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
+ $SUDO ceph tell osd.0 injectfull nearfull
+ wait_for_health "OSD_NEARFULL"
+ ceph health detail | grep "osd.0 is near full"
+ $SUDO ceph tell osd.0 injectfull none
+ wait_for_health_ok
+
+ $SUDO ceph tell osd.1 injectfull backfillfull
+ wait_for_health "OSD_BACKFILLFULL"
+ ceph health detail | grep "osd.1 is backfill full"
+ $SUDO ceph tell osd.1 injectfull none
+ wait_for_health_ok
+
+ $SUDO ceph tell osd.2 injectfull failsafe
# failsafe and full are the same as far as the monitor is concerned
- wait_for_health "HEALTH_ERR.*1 full osd(s)"
- $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
- wait_for_health "HEALTH_ERR.*2 full osd(s)"
- ceph health detail | grep "osd.0 is full at.*%"
- ceph health detail | grep "osd.2 is full at.*%"
- ceph health detail | grep "osd.1 is backfill full at.*%"
- $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
- $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
- $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
+ wait_for_health "OSD_FULL"
+ ceph health detail | grep "osd.2 is full"
+ $SUDO ceph tell osd.2 injectfull none
+ wait_for_health_ok
+
+ $SUDO ceph tell osd.0 injectfull full
+ wait_for_health "OSD_FULL"
+ ceph health detail | grep "osd.0 is full"
+ $SUDO ceph tell osd.0 injectfull none
wait_for_health_ok
ceph pg stat | grep 'pgs:'
- ceph pg 0.0 query
- ceph tell 0.0 query
- ceph quorum enter
+ ceph pg 1.0 query
+ ceph tell 1.0 query
+ first=$(ceph mon dump -f json | jq -r '.mons[0].name')
+ ceph tell mon.$first quorum enter
ceph quorum_status
ceph report | grep osd_stats
ceph status
expect_false ceph osd primary-affinity osd.9999 .5
ceph osd primary-affinity osd.0 1
- ceph osd pg-temp 0.0 0 1 2
- ceph osd pg-temp 0.0 osd.1 osd.0 osd.2
+ ceph osd pool set rbd size 2
+ ceph osd pg-temp 1.0 0 1
+ ceph osd pg-temp 1.0 osd.1 osd.0
+ expect_false ceph osd pg-temp 1.0 0 1 2
expect_false ceph osd pg-temp asdf qwer
- expect_false ceph osd pg-temp 0.0 asdf
- expect_false ceph osd pg-temp 0.0
+ expect_false ceph osd pg-temp 1.0 asdf
+ ceph osd pg-temp 1.0 # cleanup pg-temp
+
+ ceph pg repeer 1.0
+ expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore
# don't test ceph osd primary-temp for now
}
function test_mon_osd_pool_set()
{
TEST_POOL_GETSET=pool_getset
- ceph osd pool create $TEST_POOL_GETSET 1
+ expect_false ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio -0.3
+ expect_true ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio 1
+ ceph osd pool application enable $TEST_POOL_GETSET rados
+ ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off
wait_for_clean
ceph osd pool get $TEST_POOL_GETSET all
- for s in pg_num pgp_num size min_size crush_rule crush_ruleset; do
+ for s in pg_num pgp_num size min_size crush_rule target_size_ratio; do
ceph osd pool get $TEST_POOL_GETSET $s
done
old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
(( new_size = old_size + 1 ))
- ceph osd pool set $TEST_POOL_GETSET size $new_size
+ ceph osd pool set $TEST_POOL_GETSET size $new_size --yes-i-really-mean-it
ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
- ceph osd pool set $TEST_POOL_GETSET size $old_size
+ ceph osd pool set $TEST_POOL_GETSET size $old_size --yes-i-really-mean-it
ceph osd pool create pool_erasure 1 1 erasure
+ ceph osd pool application enable pool_erasure rados
wait_for_clean
set +e
ceph osd pool set pool_erasure size 4444 2>$TMPFILE
check_response 'not change the size'
set -e
ceph osd pool get pool_erasure erasure_code_profile
+ ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it
- auid=5555
- ceph osd pool set $TEST_POOL_GETSET auid $auid
- ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
- ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
- ceph osd pool set $TEST_POOL_GETSET auid 0
-
- for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
+ for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub bulk; do
ceph osd pool set $TEST_POOL_GETSET $flag false
ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
ceph osd pool set $TEST_POOL_GETSET $flag true
ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
+ ceph osd pool set $TEST_POOL_GETSET recovery_priority -5
+ ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5'
ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
+ expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11
+ expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
+ expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio -3
+ expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio abc
+ expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 0.1
+ expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 1
+ ceph osd pool get $TEST_POOL_GETSET target_size_ratio | grep 'target_size_ratio: 1'
+
ceph osd pool set $TEST_POOL_GETSET nopgchange 1
expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
ceph osd pool set $TEST_POOL_GETSET pg_num 10
wait_for_clean
ceph osd pool set $TEST_POOL_GETSET pgp_num 10
+ expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0
+ expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0
old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
- new_pgs=$(($old_pgs+$(ceph osd stat | grep osdmap | awk '{print $3}')*32))
+ new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
wait_for_clean
- old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
- new_pgs=$(($old_pgs+$(ceph osd stat | grep osdmap | awk '{print $3}')*32+1))
- expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
ceph osd pool set $TEST_POOL_GETSET nosizechange 1
expect_false ceph osd pool set $TEST_POOL_GETSET size 2
expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
+ ceph osd pool get rbd crush_rule | grep 'crush_rule: '
+
+ ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
+ ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
+ ceph osd pool set $TEST_POOL_GETSET compression_mode unset
+ ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
+ ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
+ ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
+ ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
+ expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
+ expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
+ ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
+ ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
+ ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
+ ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
+ ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
+ ceph osd pool set $TEST_POOL_GETSET csum_type unset
+ ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
+
+ for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
+ ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET $size 100
+ ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
+ ceph osd pool set $TEST_POOL_GETSET $size 0
+ ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
+ done
+
ceph osd pool set $TEST_POOL_GETSET nodelete 1
expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
ceph osd pool set $TEST_POOL_GETSET nodelete 0
ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
- ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0'
- ceph osd pool get rbd crush_rule | grep 'crush_rule: '
}
function test_mon_osd_tiered_pool_set()
ceph osd pool create real-tier 2
ceph osd tier add rbd real-tier
+ # expect us to be unable to set negative values for hit_set_*
+ for o in hit_set_period hit_set_count hit_set_fpp; do
+ expect_false ceph osd pool set real_tier $o -1
+ done
+
+ # and hit_set_fpp should be in range 0..1
+ expect_false ceph osd pool set real_tier hit_set_fpp 2
+
ceph osd pool set real-tier hit_set_type explicit_hash
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
ceph osd pool set real-tier hit_set_type explicit_object
ceph osd pool get real-tier cache_min_evict_age | \
grep 'cache_min_evict_age:[ \t]\+234'
+ # iec vs si units
+ ceph osd pool set real-tier target_max_objects 1K
+ ceph osd pool get real-tier target_max_objects | grep 1000
+ for o in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
+ ceph osd pool set real-tier $o 1Ki # no i suffix
+ val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
+ [[ $val == 1024 ]]
+ ceph osd pool set real-tier $o 1M # with i suffix
+ val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
+ [[ $val == 1048576 ]]
+ done
+
# this is not a tier pool
ceph osd pool create fake-tier 2
+ ceph osd pool application enable fake-tier rados
wait_for_clean
expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
ceph osd erasure-code-profile set fooprofile a=b c=d e=f
expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
- #
- # cleanup by removing profile 'fooprofile'
+ # make sure rule-foo doesn't work anymore
+ expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
+ ceph osd erasure-code-profile set barprofile crush-failure-domain=host
+ # clean up
ceph osd erasure-code-profile rm fooprofile
+ ceph osd erasure-code-profile rm barprofile
+
+ # try weird k and m values
+ expect_false ceph osd erasure-code-profile set badk k=1 m=1
+ expect_false ceph osd erasure-code-profile set badk k=1 m=2
+ expect_false ceph osd erasure-code-profile set badk k=0 m=2
+ expect_false ceph osd erasure-code-profile set badk k=-1 m=2
+ expect_false ceph osd erasure-code-profile set badm k=2 m=0
+ expect_false ceph osd erasure-code-profile set badm k=2 m=-1
+ ceph osd erasure-code-profile set good k=2 m=1
+ ceph osd erasure-code-profile rm good
}
function test_mon_osd_misc()
# expect error about unused argument foo
ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
- # expect "not in range" for invalid full ratio
- ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
-
# expect "not in range" for invalid overload percentage
ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
set -e
+ local old_bytes_per_osd=$(ceph config get mgr mon_reweight_min_bytes_per_osd)
+ local old_pgs_per_osd=$(ceph config get mgr mon_reweight_min_pgs_per_osd)
+ # otherwise ceph-mgr complains like:
+ # Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
+ # Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
+ ceph config set mgr mon_reweight_min_bytes_per_osd 0
+ ceph config set mgr mon_reweight_min_pgs_per_osd 0
ceph osd reweight-by-utilization 110
ceph osd reweight-by-utilization 110 .5
expect_false ceph osd reweight-by-utilization 110 0
ceph osd reweight-by-pg 110 rbd
ceph osd reweight-by-pg 110 .5 rbd
expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
-}
-
-function test_mon_heap_profiler()
-{
- do_test=1
- set +e
- # expect 'heap' commands to be correctly parsed
- ceph heap stats 2>$TMPFILE
- if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
- echo "tcmalloc not enabled; skip heap profiler test"
- do_test=0
- fi
- set -e
-
- [[ $do_test -eq 0 ]] && return 0
-
- ceph heap start_profiler
- ceph heap dump
- ceph heap stop_profiler
- ceph heap release
+ # restore the setting
+ ceph config set mgr mon_reweight_min_bytes_per_osd $old_bytes_per_osd
+ ceph config set mgr mon_reweight_min_pgs_per_osd $old_pgs_per_osd
}
function test_admin_heap_profiler()
do_test=1
set +e
# expect 'heap' commands to be correctly parsed
- ceph heap stats 2>$TMPFILE
+ ceph tell osd.0 heap stats 2>$TMPFILE
if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
echo "tcmalloc not enabled; skip heap profiler test"
do_test=0
[[ $do_test -eq 0 ]] && return 0
- local admin_socket=$(get_admin_socket osd.0)
-
- $SUDO ceph --admin-daemon $admin_socket heap start_profiler
- $SUDO ceph --admin-daemon $admin_socket heap dump
- $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
- $SUDO ceph --admin-daemon $admin_socket heap release
+ $SUDO ceph tell osd.0 heap start_profiler
+ $SUDO ceph tell osd.0 heap dump
+ $SUDO ceph tell osd.0 heap stop_profiler
+ $SUDO ceph tell osd.0 heap release
}
function test_osd_bench()
function test_mon_tell()
{
- ceph tell mon.a version
- ceph tell mon.b version
+ for m in mon.a mon.b; do
+ ceph tell $m sessions
+ ceph_watch_start debug audit
+ ceph tell mon.a sessions
+ ceph_watch_wait "${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
+ done
expect_false ceph tell mon.foo version
-
- sleep 1
-
- ceph_watch_start debug
- ceph tell mon.a version
- ceph_watch_wait 'mon.0 \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
-
- ceph_watch_start debug
- ceph tell mon.b version
- ceph_watch_wait 'mon.1 \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
-}
-
-function test_mon_crushmap_validation()
-{
- local map=$TEMP_DIR/map
- ceph osd getcrushmap -o $map
-
- local crushtool_path="${TEMP_DIR}/crushtool"
- touch "${crushtool_path}"
- chmod +x "${crushtool_path}"
- local crushtool_path_old=`ceph-conf --show-config-value crushtool`
- ceph tell mon.\* injectargs --crushtool "${crushtool_path}"
-
- printf "%s\n" \
- "#!/bin/sh
- cat > /dev/null
- exit 0" > "${crushtool_path}"
-
- ceph osd setcrushmap -i $map
-
- printf "%s\n" \
- "#!/bin/sh
- cat > /dev/null
- exit 1" > "${crushtool_path}"
-
- expect_false ceph osd setcrushmap -i $map
-
- printf "%s\n" \
- "#!/bin/sh
- cat > /dev/null
- echo 'TEST FAIL' >&2
- exit 1" > "${crushtool_path}"
-
- expect_false ceph osd setcrushmap -i $map 2> $TMPFILE
- check_response "Error EINVAL: Failed crushmap test: TEST FAIL"
-
- local mon_lease=`ceph-conf --show-config-value mon_lease`
-
- test "${mon_lease}" -gt 0
-
- printf "%s\n" \
- "#!/bin/sh
- cat > /dev/null
- sleep $((mon_lease - 1))" > "${crushtool_path}"
-
- ceph osd setcrushmap -i $map
-
- printf "%s\n" \
- "#!/bin/sh
- cat > /dev/null
- sleep $((mon_lease + 1))" > "${crushtool_path}"
-
- expect_false ceph osd setcrushmap -i $map 2> $TMPFILE
- check_response "Error EINVAL: Failed crushmap test: ${crushtool_path}: timed out (${mon_lease} sec)"
-
- ceph tell mon.\* injectargs --crushtool "${crushtool_path_old}"
-
- rm -f "${crushtool_path}"
}
function test_mon_ping()
function test_mon_deprecated_commands()
{
- # current DEPRECATED commands are:
- # ceph compact
- # ceph scrub
- # ceph sync force
+ # current DEPRECATED commands are marked with FLAG(DEPRECATED)
#
# Testing should be accomplished by setting
# 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
# each one of these commands.
- ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
- expect_false ceph tell mon.a compact 2> $TMPFILE
+ ceph tell mon.* injectargs '--mon-debug-deprecated-as-obsolete'
+ expect_false ceph config-key list 2> $TMPFILE
check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
- expect_false ceph tell mon.a scrub 2> $TMPFILE
- check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
-
- expect_false ceph tell mon.a sync force 2> $TMPFILE
- check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
-
- ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
+ ceph tell mon.* injectargs '--no-mon-debug-deprecated-as-obsolete'
}
function test_mon_cephdf_commands()
# pool section:
# RAW USED The near raw used per pool in raw total
- ceph osd pool create cephdf_for_test 32 32 replicated
+ ceph osd pool create cephdf_for_test 1 1 replicated
+ ceph osd pool application enable cephdf_for_test rados
ceph osd pool set cephdf_for_test size 2
dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
sleep 1
done
-
- cal_raw_used_size=`ceph df detail | grep cephdf_for_test | awk -F ' ' '{printf "%d\n", 2 * $3}'`
- raw_used_size=`ceph df detail | grep cephdf_for_test | awk -F ' ' '{print $10}'`
+ # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
+ # to sync mon with osd
+ flush_pg_stats
+ local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
+ stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
+ stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
rm ./cephdf_for_test
- expect_false test $cal_raw_used_size != $raw_used_size
+ expect_false test $stored != $stored_raw
+}
+
+function test_mon_pool_application()
+{
+ ceph osd pool create app_for_test 16
+
+ ceph osd pool application enable app_for_test rbd
+ expect_false ceph osd pool application enable app_for_test rgw
+ ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
+ ceph osd pool ls detail | grep "application rbd,rgw"
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
+
+ expect_false ceph osd pool application set app_for_test cephfs key value
+ ceph osd pool application set app_for_test rbd key1 value1
+ ceph osd pool application set app_for_test rbd key2 value2
+ ceph osd pool application set app_for_test rgw key1 value1
+ ceph osd pool application get app_for_test rbd key1 | grep 'value1'
+ ceph osd pool application get app_for_test rbd key2 | grep 'value2'
+ ceph osd pool application get app_for_test rgw key1 | grep 'value1'
+
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
+
+ ceph osd pool application rm app_for_test rgw key1
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
+ ceph osd pool application rm app_for_test rbd key2
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
+ ceph osd pool application rm app_for_test rbd key1
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
+ ceph osd pool application rm app_for_test rbd key1 # should be idempotent
+
+ expect_false ceph osd pool application disable app_for_test rgw
+ ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
+ ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
+ ceph osd pool ls detail | grep "application rbd"
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
+
+ ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
+ ceph osd pool ls detail | grep -v "application "
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
+
+ ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
+}
+
+function test_mon_tell_help_command()
+{
+ ceph tell mon.a help | grep sync_force
+ ceph tell mon.a -h | grep sync_force
+ ceph tell mon.a config -h | grep 'config diff get'
+
+ # wrong target
+ expect_false ceph tell mon.zzz help
+}
+
+function test_mon_stdin_stdout()
+{
+ echo foo | ceph config-key set test_key -i -
+ ceph config-key get test_key -o - | grep -c foo | grep -q 1
+}
+
+function test_osd_tell_help_command()
+{
+ ceph tell osd.1 help
+ expect_false ceph tell osd.100 help
+}
+
+function test_osd_compact()
+{
+ ceph tell osd.1 compact
+ $SUDO ceph daemon osd.1 compact
+}
+
+function test_mds_tell_help_command()
+{
+ local FS_NAME=cephfs
+ if ! mds_exists ; then
+ echo "Skipping test, no MDS found"
+ return
+ fi
+
+ remove_all_fs
+ ceph osd pool create fs_data 16
+ ceph osd pool create fs_metadata 16
+ ceph fs new $FS_NAME fs_metadata fs_data
+ wait_mds_active $FS_NAME
+
+
+ ceph tell mds.a help
+ expect_false ceph tell mds.z help
+
+ remove_all_fs
+ ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
+ ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
+}
+
+function test_mgr_tell()
+{
+ ceph tell mgr version
+}
+
+function test_mgr_devices()
+{
+ ceph device ls
+ expect_false ceph device info doesnotexist
+ expect_false ceph device get-health-metrics doesnotexist
+}
+
+function test_per_pool_scrub_status()
+{
+ ceph osd pool create noscrub_pool 16
+ ceph osd pool create noscrub_pool2 16
+ ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
+ ceph -s --format json | \
+ jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
+ expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
+ expect_false grep -q "Pool .* has .*scrub.* flag"
+ ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_false grep -q "Pool .* has .*scrub.* flag"
+
+ ceph osd pool set noscrub_pool noscrub 1
+ ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
+ ceph -s --format json | \
+ jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
+ expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_true grep -q "Pool noscrub_pool has noscrub flag"
+ ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
+
+ ceph osd pool set noscrub_pool nodeep-scrub 1
+ ceph osd pool set noscrub_pool2 nodeep-scrub 1
+ ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
+ ceph -s --format json | \
+ jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
+ expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_true grep -q "Pool noscrub_pool has noscrub flag"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
+ ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
+ ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
+ ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
+
+ ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it
+ ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it
}
#
set +x
MON_TESTS+=" mon_injectargs"
MON_TESTS+=" mon_injectargs_SI"
-MON_TESTS+=" tiering"
+for i in `seq 9`; do
+ MON_TESTS+=" tiering_$i";
+done
MON_TESTS+=" auth"
MON_TESTS+=" auth_profiles"
MON_TESTS+=" mon_misc"
MON_TESTS+=" mon_mon"
MON_TESTS+=" mon_osd"
+MON_TESTS+=" mon_config_key"
+MON_TESTS+=" mon_crush"
+MON_TESTS+=" mon_osd_create_destroy"
MON_TESTS+=" mon_osd_pool"
MON_TESTS+=" mon_osd_pool_quota"
MON_TESTS+=" mon_pg"
MON_TESTS+=" mon_osd_tiered_pool_set"
MON_TESTS+=" mon_osd_erasure_code"
MON_TESTS+=" mon_osd_misc"
-MON_TESTS+=" mon_heap_profiler"
MON_TESTS+=" mon_tell"
-MON_TESTS+=" mon_crushmap_validation"
MON_TESTS+=" mon_ping"
MON_TESTS+=" mon_deprecated_commands"
MON_TESTS+=" mon_caps"
MON_TESTS+=" mon_cephdf_commands"
+MON_TESTS+=" mon_tell_help_command"
+MON_TESTS+=" mon_stdin_stdout"
+
OSD_TESTS+=" osd_bench"
OSD_TESTS+=" osd_negative_filestore_merge_threshold"
OSD_TESTS+=" tiering_agent"
OSD_TESTS+=" admin_heap_profiler"
+OSD_TESTS+=" osd_tell_help_command"
+OSD_TESTS+=" osd_compact"
+OSD_TESTS+=" per_pool_scrub_status"
MDS_TESTS+=" mds_tell"
MDS_TESTS+=" mon_mds"
MDS_TESTS+=" mon_mds_metadata"
+MDS_TESTS+=" mds_tell_help_command"
+
+MGR_TESTS+=" mgr_tell"
+MGR_TESTS+=" mgr_devices"
TESTS+=$MON_TESTS
TESTS+=$OSD_TESTS
TESTS+=$MDS_TESTS
+TESTS+=$MGR_TESTS
#
# "main" follows
"--test-mds" )
tests_to_run+="$MDS_TESTS"
;;
+ "--test-mgr" )
+ tests_to_run+="$MGR_TESTS"
+ ;;
"-t" )
shift
if [[ -z "$1" ]]; then
exit 0
fi
+ceph osd pool create rbd 16
+
if test -z "$tests_to_run" ; then
tests_to_run="$TESTS"
fi