]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/orch/cephadm/upgrade/3-upgrade/staggered.yaml
import ceph quincy 17.2.1
[ceph.git] / ceph / qa / suites / orch / cephadm / upgrade / 3-upgrade / staggered.yaml
1 tasks:
2 - cephadm.shell:
3 env: [sha1]
4 mon.a:
5 - radosgw-admin realm create --rgw-realm=r --default
6 - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
7 - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default
8 - radosgw-admin period update --rgw-realm=r --commit
9 - ceph orch apply rgw r z --placement=2 --port=8000
10 - sleep 180
11 - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
12 - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
13 - ceph config set global log_to_journald false --force
14 # get some good info on the state of things pre-upgrade. Useful for debugging
15 - ceph orch ps
16 - ceph versions
17 - ceph -s
18 - ceph orch ls
19 # doing staggered upgrade requires mgr daemons being on a version that contains the staggered upgrade code
20 # until there is a stable version that contains it, we can test by manually upgrading a mgr daemon
21 - ceph config set mgr container_image quay.ceph.io/ceph-ci/ceph:$sha1
22 - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)"
23 - ceph orch ps --refresh
24 - sleep 180
25 # gather more possible debugging info
26 - ceph orch ps
27 - ceph versions
28 - ceph -s
29 # check that there are two different versions found for mgr daemon (which implies we upgraded one)
30 - ceph versions | jq -e '.mgr | length == 2'
31 - ceph mgr fail
32 - sleep 180
33 # now try upgrading the other mgr
34 # we should now have access to --image flag for the daemon redeploy command
35 - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1
36 - ceph orch ps --refresh
37 - sleep 180
38 # gather more possible debugging info
39 - ceph orch ps
40 - ceph versions
41 - ceph -s
42 - ceph mgr fail
43 - sleep 180
44 # gather more debugging info
45 - ceph orch ps
46 - ceph versions
47 - ceph -s
48 # now that both mgrs should have been redeployed with the new version, we should be back on only 1 version for the mgrs
49 - ceph versions | jq -e '.mgr | length == 1'
50 - ceph mgr fail
51 - sleep 180
52 # debugging info
53 - ceph orch ps
54 - ceph versions
55 # to make sure mgr daemons upgrade is fully completed, including being deployed by a mgr on new new version
56 # also serves as an early failure if manually upgrading the mgrs failed as --daemon-types won't be recognized
57 - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr
58 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
59 # verify only one version found for mgrs and that their version hash matches what we are upgrading to
60 - ceph versions | jq -e '.mgr | length == 1'
61 - ceph versions | jq -e '.mgr | keys' | grep $sha1
62 # verify overall we still se two versions, basically to make sure --daemon-types wans't ignored and all daemons upgraded
63 - ceph versions | jq -e '.overall | length == 2'
64 # check that exactly two daemons have been upgraded to the new image (our 2 mgr daemons)
65 - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 2'
66 # upgrade only the mons on one of the two hosts
67 - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.x | awk '{print $2}')
68 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
69 - ceph orch ps
70 # verify tow different version seen for mons
71 - ceph versions | jq -e '.mon | length == 2'
72 # upgrade mons on the other hosts
73 - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.y | awk '{print $2}')
74 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
75 - ceph orch ps
76 # verify all mons now on same version and version hash matches what we are upgrading to
77 - ceph versions | jq -e '.mon | length == 1'
78 - ceph versions | jq -e '.mon | keys' | grep $sha1
79 # verify exactly 5 daemons are now upgraded (2 mgrs, 3 mons)
80 - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 5'
81 # upgrade exactly 2 osd daemons
82 - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 2
83 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
84 - ceph orch ps
85 # verify two different versions now seen for osds
86 - ceph versions | jq -e '.osd | length == 2'
87 # verify exactly 7 daemons have been upgraded (2 mgrs, 3 mons, 2 osds)
88 - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 7'
89 # upgrade one more osd
90 - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd --limit 1
91 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
92 - ceph orch ps
93 - ceph versions | jq -e '.osd | length == 2'
94 # verify now 8 daemons have been upgraded
95 - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 8'
96 # upgrade the rest of the osds
97 - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd
98 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
99 - ceph orch ps
100 # verify all osds are now on same version and version hash matches what we are upgrading to
101 - ceph versions | jq -e '.osd | length == 1'
102 - ceph versions | jq -e '.osd | keys' | grep $sha1
103 # upgrade the rgw daemons using --services
104 - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --services rgw.r.z
105 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
106 - ceph orch ps
107 # verify all rgw daemons on same version and version hash matches what we are upgrading to
108 - ceph versions | jq -e '.rgw | length == 1'
109 - ceph versions | jq -e '.rgw | keys' | grep $sha1
110 # run upgrade one more time with no filter parameters to make sure anything left gets upgraded
111 - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1