]> git.proxmox.com Git - ceph.git/blame - ceph/src/test/rbd_mirror/random_write.cc
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / test / rbd_mirror / random_write.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4#include "common/ceph_argparse.h"
5#include "common/config.h"
6#include "common/debug.h"
7#include "common/errno.h"
8#include "common/Cond.h"
9#include "include/rados/librados.hpp"
10#include "include/rbd/librbd.hpp"
11#include "global/global_init.h"
12#include <string>
13#include <vector>
14
15#define dout_context g_ceph_context
16#define dout_subsys ceph_subsys_rbd_mirror
17#undef dout_prefix
18#define dout_prefix *_dout << "random-write: "
19
20namespace {
21
22const uint32_t NUM_THREADS = 8;
23const uint32_t MAX_IO_SIZE = 24576;
24const uint32_t MIN_IO_SIZE = 4;
25
26void usage() {
27 std::cout << "usage: ceph_test_rbd_mirror_random_write [options...] \\" << std::endl;
28 std::cout << " <pool> <image>" << std::endl;
29 std::cout << std::endl;
30 std::cout << " pool image pool" << std::endl;
31 std::cout << " image image to write" << std::endl;
32 std::cout << std::endl;
33 std::cout << "options:\n";
34 std::cout << " -m monaddress[:port] connect to specified monitor\n";
35 std::cout << " --keyring=<path> path to keyring for local cluster\n";
36 std::cout << " --log-file=<logfile> file to log debug output\n";
37 std::cout << " --debug-rbd-mirror=<log-level>/<memory-level> set rbd-mirror debug level\n";
38 generic_server_usage();
39}
40
41void rbd_bencher_completion(void *c, void *pc);
42
43struct rbd_bencher {
44 librbd::Image *image;
45 Mutex lock;
46 Cond cond;
47 int in_flight;
48
49 explicit rbd_bencher(librbd::Image *i)
50 : image(i),
51 lock("rbd_bencher::lock"),
52 in_flight(0) {
53 }
54
55 bool start_write(int max, uint64_t off, uint64_t len, bufferlist& bl,
56 int op_flags) {
57 {
58 Mutex::Locker l(lock);
59 if (in_flight >= max)
60 return false;
61 in_flight++;
62 }
63 librbd::RBD::AioCompletion *c =
64 new librbd::RBD::AioCompletion((void *)this, rbd_bencher_completion);
65 image->aio_write2(off, len, bl, c, op_flags);
66 //cout << "start " << c << " at " << off << "~" << len << std::endl;
67 return true;
68 }
69
70 void wait_for(int max) {
71 Mutex::Locker l(lock);
72 while (in_flight > max) {
73 utime_t dur;
74 dur.set_from_double(.2);
75 cond.WaitInterval(lock, dur);
76 }
77 }
78
79};
80
81void rbd_bencher_completion(void *vc, void *pc) {
82 librbd::RBD::AioCompletion *c = (librbd::RBD::AioCompletion *)vc;
83 rbd_bencher *b = static_cast<rbd_bencher *>(pc);
84 //cout << "complete " << c << std::endl;
85 int ret = c->get_return_value();
86 if (ret != 0) {
87 cout << "write error: " << cpp_strerror(ret) << std::endl;
88 exit(ret < 0 ? -ret : ret);
89 }
90 b->lock.Lock();
91 b->in_flight--;
92 b->cond.Signal();
93 b->lock.Unlock();
94 c->release();
95}
96
97void write_image(librbd::Image &image) {
98 srand(time(NULL) % (unsigned long) -1);
99
100 uint64_t max_io_bytes = MAX_IO_SIZE * 1024;
101 bufferptr bp(max_io_bytes);
102 memset(bp.c_str(), rand() & 0xff, bp.length());
103 bufferlist bl;
104 bl.push_back(bp);
105
106 uint64_t size = 0;
107 image.size(&size);
108 assert(size != 0);
109
110 vector<uint64_t> thread_offset;
111 uint64_t i;
112 uint64_t start_pos;
113
114 // disturb all thread's offset, used by seq write
115 for (i = 0; i < NUM_THREADS; i++) {
116 start_pos = (rand() % (size / max_io_bytes)) * max_io_bytes;
117 thread_offset.push_back(start_pos);
118 }
119
120 uint64_t total_ios = 0;
121 uint64_t total_bytes = 0;
122 rbd_bencher b(&image);
123 while (true) {
124 b.wait_for(NUM_THREADS - 1);
125 for (uint32_t i = 0; i < NUM_THREADS; ++i) {
126 // mostly small writes with a small chance of large writes
127 uint32_t io_modulo = MIN_IO_SIZE + 1;
128 if (rand() % 30 == 0) {
129 io_modulo += MAX_IO_SIZE;
130 }
131
132 uint32_t io_size = (((rand() % io_modulo) + MIN_IO_SIZE) * 1024);
133 thread_offset[i] = (rand() % (size / io_size)) * io_size;
134 if (!b.start_write(NUM_THREADS, thread_offset[i], io_size, bl,
135 LIBRADOS_OP_FLAG_FADVISE_RANDOM)) {
136 break;
137 }
138 ++i;
139
140 ++total_ios;
141 total_bytes += io_size;
142 if (total_ios % 100 == 0) {
143 std::cout << total_ios << " IOs, " << total_bytes << " bytes"
144 << std::endl;
145 }
146 }
147 }
148 b.wait_for(0);
149}
150
151} // anonymous namespace
152
153int main(int argc, const char **argv)
154{
155 std::vector<const char*> args;
156 argv_to_vec(argc, argv, args);
157 env_to_vec(args);
158
159 auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
160 CODE_ENVIRONMENT_UTILITY, 0);
161
162 for (auto i = args.begin(); i != args.end(); ++i) {
163 if (ceph_argparse_flag(args, i, "-h", "--help", (char*)NULL)) {
164 usage();
165 return EXIT_SUCCESS;
166 }
167 }
168
169 if (args.size() < 2) {
170 usage();
171 return EXIT_FAILURE;
172 }
173
174 std::string pool_name = args[0];
175 std::string image_name = args[1];
176
177 common_init_finish(g_ceph_context);
178
179 dout(5) << "connecting to cluster" << dendl;
180 librados::Rados rados;
181 librados::IoCtx io_ctx;
182 librbd::RBD rbd;
183 librbd::Image image;
184 int r = rados.init_with_context(g_ceph_context);
185 if (r < 0) {
186 derr << "could not initialize RADOS handle" << dendl;
187 return EXIT_FAILURE;
188 }
189
190 r = rados.connect();
191 if (r < 0) {
192 derr << "error connecting to local cluster" << dendl;
193 return EXIT_FAILURE;
194 }
195
196 r = rados.ioctx_create(pool_name.c_str(), io_ctx);
197 if (r < 0) {
198 derr << "error finding local pool " << pool_name << ": "
199 << cpp_strerror(r) << dendl;
200 return EXIT_FAILURE;
201 }
202
203 r = rbd.open(io_ctx, image, image_name.c_str());
204 if (r < 0) {
205 derr << "error opening image " << image_name << ": "
206 << cpp_strerror(r) << dendl;
207 return EXIT_FAILURE;
208 }
209
210 write_image(image);
211 return EXIT_SUCCESS;
212}