]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/rbd_mirror/test_ClusterWatcher.cc
update ceph source to reef 18.2.1
[ceph.git] / ceph / src / test / rbd_mirror / test_ClusterWatcher.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 #include "include/rados/librados.hpp"
4 #include "common/Cond.h"
5 #include "common/errno.h"
6 #include "common/ceph_mutex.h"
7 #include "librbd/internal.h"
8 #include "librbd/api/Mirror.h"
9 #include "tools/rbd_mirror/ClusterWatcher.h"
10 #include "tools/rbd_mirror/ServiceDaemon.h"
11 #include "tools/rbd_mirror/Types.h"
12 #include "test/rbd_mirror/test_fixture.h"
13 #include "test/librados/test_cxx.h"
14 #include "test/librbd/test_support.h"
15 #include "gtest/gtest.h"
16 #include <boost/scope_exit.hpp>
17 #include <iostream>
18 #include <map>
19 #include <memory>
20 #include <set>
21
22 using rbd::mirror::ClusterWatcher;
23 using rbd::mirror::PeerSpec;
24 using rbd::mirror::RadosRef;
25 using std::map;
26 using std::set;
27 using std::string;
28
29 void register_test_cluster_watcher() {
30 }
31
32 class TestClusterWatcher : public ::rbd::mirror::TestFixture {
33 public:
34
35 TestClusterWatcher() {
36 m_cluster = std::make_shared<librados::Rados>();
37 EXPECT_EQ("", connect_cluster_pp(*m_cluster));
38 }
39
40 ~TestClusterWatcher() override {
41 m_cluster->wait_for_latest_osdmap();
42 for (auto& pool : m_pools) {
43 EXPECT_EQ(0, m_cluster->pool_delete(pool.c_str()));
44 }
45 }
46
47 void SetUp() override {
48 TestFixture::SetUp();
49 m_service_daemon.reset(new rbd::mirror::ServiceDaemon<>(g_ceph_context,
50 m_cluster,
51 m_threads));
52 m_cluster_watcher.reset(new ClusterWatcher(m_cluster, m_lock,
53 m_service_daemon.get()));
54 }
55
56 void TearDown() override {
57 m_service_daemon.reset();
58 m_cluster_watcher.reset();
59 TestFixture::TearDown();
60 }
61
62 void create_pool(bool enable_mirroring, const PeerSpec &peer,
63 string *uuid = nullptr, string *name=nullptr) {
64 string pool_name = get_temp_pool_name("test-rbd-mirror-");
65 ASSERT_EQ(0, m_cluster->pool_create(pool_name.c_str()));
66
67 int64_t pool_id = m_cluster->pool_lookup(pool_name.c_str());
68 ASSERT_GE(pool_id, 0);
69
70 librados::IoCtx ioctx;
71 ASSERT_EQ(0, m_cluster->ioctx_create2(pool_id, ioctx));
72 ioctx.application_enable("rbd", true);
73
74 m_pools.insert(pool_name);
75 if (enable_mirroring) {
76 ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(ioctx,
77 RBD_MIRROR_MODE_POOL));
78
79 std::string gen_uuid;
80 ASSERT_EQ(0, librbd::api::Mirror<>::peer_site_add(
81 ioctx, uuid != nullptr ? uuid : &gen_uuid,
82 RBD_MIRROR_PEER_DIRECTION_RX_TX,
83 peer.cluster_name, peer.client_name));
84 m_pool_peers[pool_id].insert(peer);
85 }
86 if (name != nullptr) {
87 *name = pool_name;
88 }
89 }
90
91 void delete_pool(const string &name, const PeerSpec &peer) {
92 int64_t pool_id = m_cluster->pool_lookup(name.c_str());
93 ASSERT_GE(pool_id, 0);
94 if (m_pool_peers.find(pool_id) != m_pool_peers.end()) {
95 m_pool_peers[pool_id].erase(peer);
96 if (m_pool_peers[pool_id].empty()) {
97 m_pool_peers.erase(pool_id);
98 }
99 }
100 m_pools.erase(name);
101 ASSERT_EQ(0, m_cluster->pool_delete(name.c_str()));
102 }
103
104 void set_peer_config_key(const std::string& pool_name,
105 const PeerSpec &peer) {
106 int64_t pool_id = m_cluster->pool_lookup(pool_name.c_str());
107 ASSERT_GE(pool_id, 0);
108
109 std::string json =
110 "{"
111 "\\\"mon_host\\\": \\\"" + peer.mon_host + "\\\", "
112 "\\\"key\\\": \\\"" + peer.key + "\\\""
113 "}";
114
115 bufferlist in_bl;
116 ASSERT_EQ(0, m_cluster->mon_command(
117 "{"
118 "\"prefix\": \"config-key set\","
119 "\"key\": \"" RBD_MIRROR_PEER_CONFIG_KEY_PREFIX + stringify(pool_id) +
120 "/" + peer.uuid + "\","
121 "\"val\": \"" + json + "\"" +
122 "}", in_bl, nullptr, nullptr));
123 }
124
125 void create_cache_pool(const string &base_pool, string *cache_pool_name) {
126 bufferlist inbl;
127 *cache_pool_name = get_temp_pool_name("test-rbd-mirror-");
128 ASSERT_EQ(0, m_cluster->pool_create(cache_pool_name->c_str()));
129
130 ASSERT_EQ(0, m_cluster->mon_command(
131 "{\"prefix\": \"osd tier add\", \"pool\": \"" + base_pool +
132 "\", \"tierpool\": \"" + *cache_pool_name +
133 "\", \"force_nonempty\": \"--force-nonempty\" }",
134 inbl, NULL, NULL));
135 ASSERT_EQ(0, m_cluster->mon_command(
136 "{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + base_pool +
137 "\", \"overlaypool\": \"" + *cache_pool_name + "\"}",
138 inbl, NULL, NULL));
139 ASSERT_EQ(0, m_cluster->mon_command(
140 "{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + *cache_pool_name +
141 "\", \"mode\": \"writeback\"}",
142 inbl, NULL, NULL));
143 m_cluster->wait_for_latest_osdmap();
144 }
145
146 void remove_cache_pool(const string &base_pool, const string &cache_pool) {
147 bufferlist inbl;
148 // tear down tiers
149 ASSERT_EQ(0, m_cluster->mon_command(
150 "{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + base_pool +
151 "\"}",
152 inbl, NULL, NULL));
153 ASSERT_EQ(0, m_cluster->mon_command(
154 "{\"prefix\": \"osd tier remove\", \"pool\": \"" + base_pool +
155 "\", \"tierpool\": \"" + cache_pool + "\"}",
156 inbl, NULL, NULL));
157 m_cluster->wait_for_latest_osdmap();
158 m_cluster->pool_delete(cache_pool.c_str());
159 }
160
161 void check_peers() {
162 m_cluster_watcher->refresh_pools();
163 std::lock_guard l{m_lock};
164 ASSERT_EQ(m_pool_peers, m_cluster_watcher->get_pool_peers());
165 }
166
167 RadosRef m_cluster;
168 ceph::mutex m_lock = ceph::make_mutex("TestClusterWatcherLock");
169 std::unique_ptr<rbd::mirror::ServiceDaemon<>> m_service_daemon;
170 std::unique_ptr<ClusterWatcher> m_cluster_watcher;
171
172 set<string> m_pools;
173 ClusterWatcher::PoolPeers m_pool_peers;
174 };
175
176 TEST_F(TestClusterWatcher, NoPools) {
177 check_peers();
178 }
179
180 TEST_F(TestClusterWatcher, NoMirroredPools) {
181 check_peers();
182 create_pool(false, PeerSpec());
183 check_peers();
184 create_pool(false, PeerSpec());
185 check_peers();
186 create_pool(false, PeerSpec());
187 check_peers();
188 }
189
190 TEST_F(TestClusterWatcher, ReplicatedPools) {
191 PeerSpec site1("", "site1", "mirror1");
192 PeerSpec site2("", "site2", "mirror2");
193 string first_pool, last_pool;
194 check_peers();
195 create_pool(true, site1, &site1.uuid, &first_pool);
196 check_peers();
197 create_pool(false, PeerSpec());
198 check_peers();
199 create_pool(false, PeerSpec());
200 check_peers();
201 create_pool(false, PeerSpec());
202 check_peers();
203 create_pool(true, site2, &site2.uuid);
204 check_peers();
205 create_pool(true, site2, &site2.uuid);
206 check_peers();
207 create_pool(true, site2, &site2.uuid, &last_pool);
208 check_peers();
209 delete_pool(first_pool, site1);
210 check_peers();
211 delete_pool(last_pool, site2);
212 check_peers();
213 }
214
215 TEST_F(TestClusterWatcher, CachePools) {
216 PeerSpec site1("", "site1", "mirror1");
217 string base1, base2, cache1, cache2;
218 create_pool(true, site1, &site1.uuid, &base1);
219 check_peers();
220
221 create_cache_pool(base1, &cache1);
222 BOOST_SCOPE_EXIT( base1, cache1, this_ ) {
223 this_->remove_cache_pool(base1, cache1);
224 } BOOST_SCOPE_EXIT_END;
225 check_peers();
226
227 create_pool(false, PeerSpec(), nullptr, &base2);
228 create_cache_pool(base2, &cache2);
229 BOOST_SCOPE_EXIT( base2, cache2, this_ ) {
230 this_->remove_cache_pool(base2, cache2);
231 } BOOST_SCOPE_EXIT_END;
232 check_peers();
233 }
234
235 TEST_F(TestClusterWatcher, ConfigKey) {
236 REQUIRE(!is_librados_test_stub(*m_cluster));
237
238 std::string pool_name;
239 check_peers();
240
241 PeerSpec site1("", "site1", "mirror1");
242 create_pool(true, site1, &site1.uuid, &pool_name);
243 check_peers();
244
245 PeerSpec site2("", "site2", "mirror2");
246 site2.mon_host = "abc";
247 site2.key = "xyz";
248 create_pool(false, site2, &site2.uuid);
249 set_peer_config_key(pool_name, site2);
250
251 check_peers();
252 }
253
254 TEST_F(TestClusterWatcher, SiteName) {
255 REQUIRE(!is_librados_test_stub(*m_cluster));
256
257 std::string site_name;
258 librbd::RBD rbd;
259 ASSERT_EQ(0, rbd.mirror_site_name_get(*m_cluster, &site_name));
260
261 m_cluster_watcher->refresh_pools();
262
263 std::lock_guard l{m_lock};
264 ASSERT_EQ(site_name, m_cluster_watcher->get_site_name());
265 }