]>
git.proxmox.com Git - ceph.git/blob - ceph/src/test/librados/test_cxx.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
2 // vim: ts=8 sw=2 smarttab
6 #include "include/stringify.h"
7 #include "common/ceph_context.h"
8 #include "common/config.h"
17 #include "gtest/gtest.h"
19 using namespace librados
;
21 std::string
create_one_pool_pp(const std::string
&pool_name
, Rados
&cluster
)
23 return create_one_pool_pp(pool_name
, cluster
, {});
25 std::string
create_one_pool_pp(const std::string
&pool_name
, Rados
&cluster
,
26 const std::map
<std::string
, std::string
> &config
)
28 std::string err
= connect_cluster_pp(cluster
, config
);
31 int ret
= cluster
.pool_create(pool_name
.c_str());
34 std::ostringstream oss
;
35 oss
<< "cluster.pool_create(" << pool_name
<< ") failed with error " << ret
;
40 ret
= cluster
.ioctx_create(pool_name
.c_str(), ioctx
);
43 std::ostringstream oss
;
44 oss
<< "cluster.ioctx_create(" << pool_name
<< ") failed with error "
48 ioctx
.application_enable("rados", true);
52 int destroy_rule_pp(Rados
&cluster
,
53 const std::string
&rule
,
57 int ret
= cluster
.mon_command("{\"prefix\": \"osd crush rule rm\", \"name\":\"" +
58 rule
+ "\"}", inbl
, NULL
, NULL
);
60 oss
<< "mon_command: osd crush rule rm " + rule
+ " failed with error " << ret
<< std::endl
;
64 int destroy_ec_profile_pp(Rados
&cluster
, const std::string
& pool_name
,
68 int ret
= cluster
.mon_command("{\"prefix\": \"osd erasure-code-profile rm\", \"name\": \"testprofile-" + pool_name
+ "\"}",
71 oss
<< "mon_command: osd erasure-code-profile rm testprofile-" << pool_name
<< " failed with error " << ret
<< std::endl
;
75 int destroy_ec_profile_and_rule_pp(Rados
&cluster
,
76 const std::string
&rule
,
80 ret
= destroy_ec_profile_pp(cluster
, rule
, oss
);
83 return destroy_rule_pp(cluster
, rule
, oss
);
86 std::string
create_one_ec_pool_pp(const std::string
&pool_name
, Rados
&cluster
)
88 std::string err
= connect_cluster_pp(cluster
);
92 std::ostringstream oss
;
93 int ret
= destroy_ec_profile_and_rule_pp(cluster
, pool_name
, oss
);
100 ret
= cluster
.mon_command(
101 "{\"prefix\": \"osd erasure-code-profile set\", \"name\": \"testprofile-" + pool_name
+ "\", \"profile\": [ \"k=2\", \"m=1\", \"crush-failure-domain=osd\"]}",
105 oss
<< "mon_command erasure-code-profile set name:testprofile-" << pool_name
<< " failed with error " << ret
;
109 ret
= cluster
.mon_command(
110 "{\"prefix\": \"osd pool create\", \"pool\": \"" + pool_name
+ "\", \"pool_type\":\"erasure\", \"pg_num\":8, \"pgp_num\":8, \"erasure_code_profile\":\"testprofile-" + pool_name
+ "\"}",
114 destroy_ec_profile_pp(cluster
, pool_name
, oss
);
116 oss
<< "mon_command osd pool create pool:" << pool_name
<< " pool_type:erasure failed with error " << ret
;
120 cluster
.wait_for_latest_osdmap();
124 std::string
connect_cluster_pp(librados::Rados
&cluster
)
126 return connect_cluster_pp(cluster
, {});
129 std::string
connect_cluster_pp(librados::Rados
&cluster
,
130 const std::map
<std::string
, std::string
> &config
)
132 char *id
= getenv("CEPH_CLIENT_ID");
133 if (id
) std::cerr
<< "Client id is: " << id
<< std::endl
;
136 ret
= cluster
.init(id
);
138 std::ostringstream oss
;
139 oss
<< "cluster.init failed with error " << ret
;
142 ret
= cluster
.conf_read_file(NULL
);
145 std::ostringstream oss
;
146 oss
<< "cluster.conf_read_file failed with error " << ret
;
149 cluster
.conf_parse_env(NULL
);
151 for (auto &setting
: config
) {
152 ret
= cluster
.conf_set(setting
.first
.c_str(), setting
.second
.c_str());
154 std::ostringstream oss
;
155 oss
<< "failed to set config value " << setting
.first
<< " to '"
156 << setting
.second
<< "': " << strerror(-ret
);
161 ret
= cluster
.connect();
164 std::ostringstream oss
;
165 oss
<< "cluster.connect failed with error " << ret
;
171 int destroy_one_pool_pp(const std::string
&pool_name
, Rados
&cluster
)
173 int ret
= cluster
.pool_delete(pool_name
.c_str());
182 int destroy_one_ec_pool_pp(const std::string
&pool_name
, Rados
&cluster
)
184 int ret
= cluster
.pool_delete(pool_name
.c_str());
190 CephContext
*cct
= static_cast<CephContext
*>(cluster
.cct());
191 if (!cct
->_conf
->mon_fake_pool_delete
) { // hope this is in [global]
192 std::ostringstream oss
;
193 ret
= destroy_ec_profile_and_rule_pp(cluster
, pool_name
, oss
);
200 cluster
.wait_for_latest_osdmap();