]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/librados/test_cxx.cc
import quincy 17.2.0
[ceph.git] / ceph / src / test / librados / test_cxx.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
2 // vim: ts=8 sw=2 smarttab
3
4 #include "test_cxx.h"
5
6 #include "include/stringify.h"
7 #include "common/ceph_context.h"
8 #include "common/config.h"
9
10 #include <errno.h>
11 #include <sstream>
12 #include <stdlib.h>
13 #include <string>
14 #include <time.h>
15 #include <unistd.h>
16 #include <iostream>
17 #include "gtest/gtest.h"
18
19 using namespace librados;
20
21 std::string create_one_pool_pp(const std::string &pool_name, Rados &cluster)
22 {
23 return create_one_pool_pp(pool_name, cluster, {});
24 }
25 std::string create_one_pool_pp(const std::string &pool_name, Rados &cluster,
26 const std::map<std::string, std::string> &config)
27 {
28 std::string err = connect_cluster_pp(cluster, config);
29 if (err.length())
30 return err;
31 int ret = cluster.pool_create(pool_name.c_str());
32 if (ret) {
33 cluster.shutdown();
34 std::ostringstream oss;
35 oss << "cluster.pool_create(" << pool_name << ") failed with error " << ret;
36 return oss.str();
37 }
38
39 IoCtx ioctx;
40 ret = cluster.ioctx_create(pool_name.c_str(), ioctx);
41 if (ret < 0) {
42 cluster.shutdown();
43 std::ostringstream oss;
44 oss << "cluster.ioctx_create(" << pool_name << ") failed with error "
45 << ret;
46 return oss.str();
47 }
48 ioctx.application_enable("rados", true);
49 return "";
50 }
51
52 int destroy_rule_pp(Rados &cluster,
53 const std::string &rule,
54 std::ostream &oss)
55 {
56 bufferlist inbl;
57 int ret = cluster.mon_command("{\"prefix\": \"osd crush rule rm\", \"name\":\"" +
58 rule + "\"}", inbl, NULL, NULL);
59 if (ret)
60 oss << "mon_command: osd crush rule rm " + rule + " failed with error " << ret << std::endl;
61 return ret;
62 }
63
64 int destroy_ec_profile_pp(Rados &cluster, const std::string& pool_name,
65 std::ostream &oss)
66 {
67 bufferlist inbl;
68 int ret = cluster.mon_command("{\"prefix\": \"osd erasure-code-profile rm\", \"name\": \"testprofile-" + pool_name + "\"}",
69 inbl, NULL, NULL);
70 if (ret)
71 oss << "mon_command: osd erasure-code-profile rm testprofile-" << pool_name << " failed with error " << ret << std::endl;
72 return ret;
73 }
74
75 int destroy_ec_profile_and_rule_pp(Rados &cluster,
76 const std::string &rule,
77 std::ostream &oss)
78 {
79 int ret;
80 ret = destroy_ec_profile_pp(cluster, rule, oss);
81 if (ret)
82 return ret;
83 return destroy_rule_pp(cluster, rule, oss);
84 }
85
86 std::string create_one_ec_pool_pp(const std::string &pool_name, Rados &cluster)
87 {
88 std::string err = connect_cluster_pp(cluster);
89 if (err.length())
90 return err;
91
92 std::ostringstream oss;
93 int ret = destroy_ec_profile_and_rule_pp(cluster, pool_name, oss);
94 if (ret) {
95 cluster.shutdown();
96 return oss.str();
97 }
98
99 bufferlist inbl;
100 ret = cluster.mon_command(
101 "{\"prefix\": \"osd erasure-code-profile set\", \"name\": \"testprofile-" + pool_name + "\", \"profile\": [ \"k=2\", \"m=1\", \"crush-failure-domain=osd\"]}",
102 inbl, NULL, NULL);
103 if (ret) {
104 cluster.shutdown();
105 oss << "mon_command erasure-code-profile set name:testprofile-" << pool_name << " failed with error " << ret;
106 return oss.str();
107 }
108
109 ret = cluster.mon_command(
110 "{\"prefix\": \"osd pool create\", \"pool\": \"" + pool_name + "\", \"pool_type\":\"erasure\", \"pg_num\":8, \"pgp_num\":8, \"erasure_code_profile\":\"testprofile-" + pool_name + "\"}",
111 inbl, NULL, NULL);
112 if (ret) {
113 bufferlist inbl;
114 destroy_ec_profile_pp(cluster, pool_name, oss);
115 cluster.shutdown();
116 oss << "mon_command osd pool create pool:" << pool_name << " pool_type:erasure failed with error " << ret;
117 return oss.str();
118 }
119
120 cluster.wait_for_latest_osdmap();
121 return "";
122 }
123
124 std::string connect_cluster_pp(librados::Rados &cluster)
125 {
126 return connect_cluster_pp(cluster, {});
127 }
128
129 std::string connect_cluster_pp(librados::Rados &cluster,
130 const std::map<std::string, std::string> &config)
131 {
132 char *id = getenv("CEPH_CLIENT_ID");
133 if (id) std::cerr << "Client id is: " << id << std::endl;
134
135 int ret;
136 ret = cluster.init(id);
137 if (ret) {
138 std::ostringstream oss;
139 oss << "cluster.init failed with error " << ret;
140 return oss.str();
141 }
142 ret = cluster.conf_read_file(NULL);
143 if (ret) {
144 cluster.shutdown();
145 std::ostringstream oss;
146 oss << "cluster.conf_read_file failed with error " << ret;
147 return oss.str();
148 }
149 cluster.conf_parse_env(NULL);
150
151 for (auto &setting : config) {
152 ret = cluster.conf_set(setting.first.c_str(), setting.second.c_str());
153 if (ret) {
154 std::ostringstream oss;
155 oss << "failed to set config value " << setting.first << " to '"
156 << setting.second << "': " << strerror(-ret);
157 return oss.str();
158 }
159 }
160
161 ret = cluster.connect();
162 if (ret) {
163 cluster.shutdown();
164 std::ostringstream oss;
165 oss << "cluster.connect failed with error " << ret;
166 return oss.str();
167 }
168 return "";
169 }
170
171 int destroy_one_pool_pp(const std::string &pool_name, Rados &cluster)
172 {
173 int ret = cluster.pool_delete(pool_name.c_str());
174 if (ret) {
175 cluster.shutdown();
176 return ret;
177 }
178 cluster.shutdown();
179 return 0;
180 }
181
182 int destroy_one_ec_pool_pp(const std::string &pool_name, Rados &cluster)
183 {
184 int ret = cluster.pool_delete(pool_name.c_str());
185 if (ret) {
186 cluster.shutdown();
187 return ret;
188 }
189
190 CephContext *cct = static_cast<CephContext*>(cluster.cct());
191 if (!cct->_conf->mon_fake_pool_delete) { // hope this is in [global]
192 std::ostringstream oss;
193 ret = destroy_ec_profile_and_rule_pp(cluster, pool_name, oss);
194 if (ret) {
195 cluster.shutdown();
196 return ret;
197 }
198 }
199
200 cluster.wait_for_latest_osdmap();
201 cluster.shutdown();
202 return ret;
203 }