]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/librados_test_stub/TestMemCluster.cc
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / test / librados_test_stub / TestMemCluster.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "test/librados_test_stub/TestMemCluster.h"
5 #include "test/librados_test_stub/TestMemRadosClient.h"
6
7 namespace librados {
8
9 TestMemCluster::File::File()
10 : objver(0), snap_id(), exists(true) {
11 }
12
13 TestMemCluster::File::File(const File &rhs)
14 : data(rhs.data),
15 mtime(rhs.mtime),
16 objver(rhs.objver),
17 snap_id(rhs.snap_id),
18 exists(rhs.exists) {
19 }
20
21 TestMemCluster::Pool::Pool() = default;
22
23 TestMemCluster::TestMemCluster()
24 : m_next_nonce(static_cast<uint32_t>(reinterpret_cast<uint64_t>(this))) {
25 }
26
27 TestMemCluster::~TestMemCluster() {
28 for (auto pool_pair : m_pools) {
29 pool_pair.second->put();
30 }
31 }
32
33 TestRadosClient *TestMemCluster::create_rados_client(CephContext *cct) {
34 return new TestMemRadosClient(cct, this);
35 }
36
37 int TestMemCluster::register_object_handler(int64_t pool_id,
38 const ObjectLocator& locator,
39 ObjectHandler* object_handler) {
40 std::lock_guard locker{m_lock};
41 auto pool = get_pool(m_lock, pool_id);
42 if (pool == nullptr) {
43 return -ENOENT;
44 }
45
46 std::unique_lock pool_locker{pool->file_lock};
47 auto file_it = pool->files.find(locator);
48 if (file_it == pool->files.end()) {
49 return -ENOENT;
50 }
51
52 auto& object_handlers = pool->file_handlers[locator];
53 auto it = object_handlers.find(object_handler);
54 ceph_assert(it == object_handlers.end());
55
56 object_handlers.insert(object_handler);
57 return 0;
58 }
59
60 void TestMemCluster::unregister_object_handler(int64_t pool_id,
61 const ObjectLocator& locator,
62 ObjectHandler* object_handler) {
63 std::lock_guard locker{m_lock};
64 auto pool = get_pool(m_lock, pool_id);
65 if (pool == nullptr) {
66 return;
67 }
68
69 std::unique_lock pool_locker{pool->file_lock};
70 auto handlers_it = pool->file_handlers.find(locator);
71 if (handlers_it == pool->file_handlers.end()) {
72 return;
73 }
74
75 auto& object_handlers = handlers_it->second;
76 object_handlers.erase(object_handler);
77 }
78
79 int TestMemCluster::pool_create(const std::string &pool_name) {
80 std::lock_guard locker{m_lock};
81 if (m_pools.find(pool_name) != m_pools.end()) {
82 return -EEXIST;
83 }
84 Pool *pool = new Pool();
85 pool->pool_id = ++m_pool_id;
86 m_pools[pool_name] = pool;
87 return 0;
88 }
89
90 int TestMemCluster::pool_delete(const std::string &pool_name) {
91 std::lock_guard locker{m_lock};
92 Pools::iterator iter = m_pools.find(pool_name);
93 if (iter == m_pools.end()) {
94 return -ENOENT;
95 }
96 iter->second->put();
97 m_pools.erase(iter);
98 return 0;
99 }
100
101 int TestMemCluster::pool_get_base_tier(int64_t pool_id, int64_t* base_tier) {
102 // TODO
103 *base_tier = pool_id;
104 return 0;
105 }
106
107 int TestMemCluster::pool_list(std::list<std::pair<int64_t, std::string> >& v) {
108 std::lock_guard locker{m_lock};
109 v.clear();
110 for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
111 v.push_back(std::make_pair(iter->second->pool_id, iter->first));
112 }
113 return 0;
114 }
115
116 int64_t TestMemCluster::pool_lookup(const std::string &pool_name) {
117 std::lock_guard locker{m_lock};
118 Pools::iterator iter = m_pools.find(pool_name);
119 if (iter == m_pools.end()) {
120 return -ENOENT;
121 }
122 return iter->second->pool_id;
123 }
124
125 int TestMemCluster::pool_reverse_lookup(int64_t id, std::string *name) {
126 std::lock_guard locker{m_lock};
127 for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
128 if (iter->second->pool_id == id) {
129 *name = iter->first;
130 return 0;
131 }
132 }
133 return -ENOENT;
134 }
135
136 TestMemCluster::Pool *TestMemCluster::get_pool(int64_t pool_id) {
137 std::lock_guard locker{m_lock};
138 return get_pool(m_lock, pool_id);
139 }
140
141 TestMemCluster::Pool *TestMemCluster::get_pool(const ceph::mutex& lock,
142 int64_t pool_id) {
143 for (auto &pool_pair : m_pools) {
144 if (pool_pair.second->pool_id == pool_id) {
145 return pool_pair.second;
146 }
147 }
148 return nullptr;
149 }
150
151 TestMemCluster::Pool *TestMemCluster::get_pool(const std::string &pool_name) {
152 std::lock_guard locker{m_lock};
153 Pools::iterator iter = m_pools.find(pool_name);
154 if (iter != m_pools.end()) {
155 return iter->second;
156 }
157 return nullptr;
158 }
159
160 void TestMemCluster::allocate_client(uint32_t *nonce, uint64_t *global_id) {
161 std::lock_guard locker{m_lock};
162 *nonce = m_next_nonce++;
163 *global_id = m_next_global_id++;
164 }
165
166 void TestMemCluster::deallocate_client(uint32_t nonce) {
167 std::lock_guard locker{m_lock};
168 m_blocklist.erase(nonce);
169 }
170
171 bool TestMemCluster::is_blocklisted(uint32_t nonce) const {
172 std::lock_guard locker{m_lock};
173 return (m_blocklist.find(nonce) != m_blocklist.end());
174 }
175
176 void TestMemCluster::blocklist(uint32_t nonce) {
177 {
178 std::lock_guard locker{m_lock};
179 m_blocklist.insert(nonce);
180 }
181
182 // after blocklisting the client, disconnect and drop its watches
183 m_watch_notify.blocklist(nonce);
184 }
185
186 void TestMemCluster::transaction_start(const ObjectLocator& locator) {
187 std::unique_lock locker{m_lock};
188 m_transaction_cond.wait(locker, [&locator, this] {
189 return m_transactions.count(locator) == 0;
190 });
191 auto result = m_transactions.insert(locator);
192 ceph_assert(result.second);
193 }
194
195 void TestMemCluster::transaction_finish(const ObjectLocator& locator) {
196 std::lock_guard locker{m_lock};
197 size_t count = m_transactions.erase(locator);
198 ceph_assert(count == 1);
199 m_transaction_cond.notify_all();
200 }
201
202 } // namespace librados
203