]> git.proxmox.com Git - ceph.git/blame - ceph/qa/workunits/fs/full/subvolume_snapshot_rm.sh
import ceph quincy 17.2.4
[ceph.git] / ceph / qa / workunits / fs / full / subvolume_snapshot_rm.sh
CommitLineData
2a845540
TL
1#!/usr/bin/env bash
2set -ex
3
4# This testcase tests the 'ceph fs subvolume snapshot rm' when the osd is full.
5# The snapshot rm fails with 'MetadataMgrException: -28 (error in write)' and
6# truncates the config file of corresponding subvolume. Hence the subsequent
7# snapshot rm of the same snapshot fails with 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)'
8# traceback.
9
10# The osd is of the size 1GB. The subvolume is created and 800MB file is written.
11# Then full-ratios are set below 500MB such that the osd is treated as full.
12# The subvolume snapshot is taken which succeeds as no extra space is required
13# for snapshot. Now, the removal of the snapshot fails with ENOSPACE as it
14# fails to remove the snapshot metadata set. The snapshot removal fails
15# but should not traceback and truncate the config file.
16
17set -e
18expect_failure() {
19 if "$@"; then return 1; else return 0; fi
20}
21
22ignore_failure() {
23 if "$@"; then return 0; else return 0; fi
24}
25
26ceph fs subvolume create cephfs sub_0
27subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
28
29#For debugging
30echo "Before write"
31df $CEPH_MNT
32ceph osd df
33
34# Write 800MB file and set full ratio to around 200MB
35ignore_failure sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/800MB_file-1 status=progress bs=1M count=800 conv=fdatasync
36
37ceph osd set-full-ratio 0.2
38ceph osd set-nearfull-ratio 0.16
39ceph osd set-backfillfull-ratio 0.18
40
41timeout=30
42while [ $timeout -gt 0 ]
43do
44 health=$(ceph health detail)
45 [[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
46 echo "Wating for osd to be full: $timeout"
47 sleep 1
48 let "timeout-=1"
49done
50
51#Take snapshot
52ceph fs subvolume snapshot create cephfs sub_0 snap_0
53
54#Remove snapshot fails but should not throw traceback
55expect_failure ceph fs subvolume snapshot rm cephfs sub_0 snap_0 2>/tmp/error_${PID}_file
56cat /tmp/error_${PID}_file
57
58# No traceback should be found
59expect_failure grep "Traceback" /tmp/error_${PID}_file
60
61# Validate config file is not truncated and GLOBAL section exists
62sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/sub_0/.meta
63
64#For debugging
65echo "After write"
66df $CEPH_MNT
67ceph osd df
68
69# Snapshot removal with force option should succeed
70ceph fs subvolume snapshot rm cephfs sub_0 snap_0 --force
71
72#Cleanup from backend
73ignore_failure sudo rm -f /tmp/error_${PID}_file
74ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0
75
76#Set the ratios back for other full tests to run
77ceph osd set-full-ratio 0.95
78ceph osd set-nearfull-ratio 0.95
79ceph osd set-backfillfull-ratio 0.95
80
81#After test
82echo "After test"
83df -h $CEPH_MNT
84ceph osd df
85
86echo OK