]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
4 | #include "common/ceph_argparse.h" | |
5 | #include "common/config.h" | |
6 | #include "common/debug.h" | |
7 | #include "common/errno.h" | |
8 | #include "common/Cond.h" | |
9 | #include "include/rados/librados.hpp" | |
10 | #include "include/rbd/librbd.hpp" | |
11 | #include "global/global_init.h" | |
12 | #include <string> | |
13 | #include <vector> | |
14 | ||
15 | #define dout_context g_ceph_context | |
16 | #define dout_subsys ceph_subsys_rbd_mirror | |
17 | #undef dout_prefix | |
18 | #define dout_prefix *_dout << "random-write: " | |
19 | ||
20 | namespace { | |
21 | ||
22 | const uint32_t NUM_THREADS = 8; | |
23 | const uint32_t MAX_IO_SIZE = 24576; | |
24 | const uint32_t MIN_IO_SIZE = 4; | |
25 | ||
26 | void usage() { | |
27 | std::cout << "usage: ceph_test_rbd_mirror_random_write [options...] \\" << std::endl; | |
28 | std::cout << " <pool> <image>" << std::endl; | |
29 | std::cout << std::endl; | |
30 | std::cout << " pool image pool" << std::endl; | |
31 | std::cout << " image image to write" << std::endl; | |
32 | std::cout << std::endl; | |
33 | std::cout << "options:\n"; | |
34 | std::cout << " -m monaddress[:port] connect to specified monitor\n"; | |
35 | std::cout << " --keyring=<path> path to keyring for local cluster\n"; | |
36 | std::cout << " --log-file=<logfile> file to log debug output\n"; | |
37 | std::cout << " --debug-rbd-mirror=<log-level>/<memory-level> set rbd-mirror debug level\n"; | |
38 | generic_server_usage(); | |
39 | } | |
40 | ||
41 | void rbd_bencher_completion(void *c, void *pc); | |
42 | ||
43 | struct rbd_bencher { | |
44 | librbd::Image *image; | |
9f95a23c TL |
45 | ceph::mutex lock = ceph::make_mutex("rbd_bencher::lock"); |
46 | ceph::condition_variable cond; | |
7c673cae FG |
47 | int in_flight; |
48 | ||
49 | explicit rbd_bencher(librbd::Image *i) | |
50 | : image(i), | |
7c673cae FG |
51 | in_flight(0) { |
52 | } | |
53 | ||
54 | bool start_write(int max, uint64_t off, uint64_t len, bufferlist& bl, | |
55 | int op_flags) { | |
56 | { | |
9f95a23c | 57 | std::lock_guard l{lock}; |
7c673cae FG |
58 | if (in_flight >= max) |
59 | return false; | |
60 | in_flight++; | |
61 | } | |
62 | librbd::RBD::AioCompletion *c = | |
63 | new librbd::RBD::AioCompletion((void *)this, rbd_bencher_completion); | |
64 | image->aio_write2(off, len, bl, c, op_flags); | |
65 | //cout << "start " << c << " at " << off << "~" << len << std::endl; | |
66 | return true; | |
67 | } | |
68 | ||
69 | void wait_for(int max) { | |
20effc67 | 70 | using namespace std::chrono_literals; |
9f95a23c | 71 | std::unique_lock l{lock}; |
7c673cae | 72 | while (in_flight > max) { |
9f95a23c | 73 | cond.wait_for(l, 200ms); |
7c673cae FG |
74 | } |
75 | } | |
76 | ||
77 | }; | |
78 | ||
79 | void rbd_bencher_completion(void *vc, void *pc) { | |
80 | librbd::RBD::AioCompletion *c = (librbd::RBD::AioCompletion *)vc; | |
81 | rbd_bencher *b = static_cast<rbd_bencher *>(pc); | |
82 | //cout << "complete " << c << std::endl; | |
83 | int ret = c->get_return_value(); | |
84 | if (ret != 0) { | |
20effc67 | 85 | std::cout << "write error: " << cpp_strerror(ret) << std::endl; |
7c673cae FG |
86 | exit(ret < 0 ? -ret : ret); |
87 | } | |
9f95a23c | 88 | b->lock.lock(); |
7c673cae | 89 | b->in_flight--; |
9f95a23c TL |
90 | b->cond.notify_all(); |
91 | b->lock.unlock(); | |
7c673cae FG |
92 | c->release(); |
93 | } | |
94 | ||
95 | void write_image(librbd::Image &image) { | |
96 | srand(time(NULL) % (unsigned long) -1); | |
97 | ||
98 | uint64_t max_io_bytes = MAX_IO_SIZE * 1024; | |
99 | bufferptr bp(max_io_bytes); | |
100 | memset(bp.c_str(), rand() & 0xff, bp.length()); | |
101 | bufferlist bl; | |
102 | bl.push_back(bp); | |
103 | ||
104 | uint64_t size = 0; | |
105 | image.size(&size); | |
11fdf7f2 | 106 | ceph_assert(size != 0); |
7c673cae | 107 | |
20effc67 | 108 | std::vector<uint64_t> thread_offset; |
7c673cae FG |
109 | uint64_t i; |
110 | uint64_t start_pos; | |
111 | ||
112 | // disturb all thread's offset, used by seq write | |
113 | for (i = 0; i < NUM_THREADS; i++) { | |
114 | start_pos = (rand() % (size / max_io_bytes)) * max_io_bytes; | |
115 | thread_offset.push_back(start_pos); | |
116 | } | |
117 | ||
118 | uint64_t total_ios = 0; | |
119 | uint64_t total_bytes = 0; | |
120 | rbd_bencher b(&image); | |
121 | while (true) { | |
122 | b.wait_for(NUM_THREADS - 1); | |
123 | for (uint32_t i = 0; i < NUM_THREADS; ++i) { | |
124 | // mostly small writes with a small chance of large writes | |
125 | uint32_t io_modulo = MIN_IO_SIZE + 1; | |
126 | if (rand() % 30 == 0) { | |
127 | io_modulo += MAX_IO_SIZE; | |
128 | } | |
129 | ||
130 | uint32_t io_size = (((rand() % io_modulo) + MIN_IO_SIZE) * 1024); | |
131 | thread_offset[i] = (rand() % (size / io_size)) * io_size; | |
132 | if (!b.start_write(NUM_THREADS, thread_offset[i], io_size, bl, | |
133 | LIBRADOS_OP_FLAG_FADVISE_RANDOM)) { | |
134 | break; | |
135 | } | |
136 | ++i; | |
137 | ||
138 | ++total_ios; | |
139 | total_bytes += io_size; | |
140 | if (total_ios % 100 == 0) { | |
141 | std::cout << total_ios << " IOs, " << total_bytes << " bytes" | |
142 | << std::endl; | |
143 | } | |
144 | } | |
145 | } | |
146 | b.wait_for(0); | |
147 | } | |
148 | ||
149 | } // anonymous namespace | |
150 | ||
151 | int main(int argc, const char **argv) | |
152 | { | |
20effc67 | 153 | auto args = argv_to_vec(argc, argv); |
11fdf7f2 | 154 | if (args.empty()) { |
20effc67 | 155 | std::cerr << argv[0] << ": -h or --help for usage" << std::endl; |
11fdf7f2 TL |
156 | exit(1); |
157 | } | |
158 | if (ceph_argparse_need_usage(args)) { | |
159 | usage(); | |
160 | exit(0); | |
161 | } | |
7c673cae | 162 | |
20effc67 | 163 | auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT, |
f67539c2 TL |
164 | CODE_ENVIRONMENT_UTILITY, |
165 | CINIT_FLAG_NO_MON_CONFIG); | |
7c673cae FG |
166 | |
167 | if (args.size() < 2) { | |
168 | usage(); | |
169 | return EXIT_FAILURE; | |
170 | } | |
171 | ||
172 | std::string pool_name = args[0]; | |
173 | std::string image_name = args[1]; | |
174 | ||
175 | common_init_finish(g_ceph_context); | |
176 | ||
177 | dout(5) << "connecting to cluster" << dendl; | |
178 | librados::Rados rados; | |
179 | librados::IoCtx io_ctx; | |
180 | librbd::RBD rbd; | |
181 | librbd::Image image; | |
182 | int r = rados.init_with_context(g_ceph_context); | |
183 | if (r < 0) { | |
184 | derr << "could not initialize RADOS handle" << dendl; | |
185 | return EXIT_FAILURE; | |
186 | } | |
187 | ||
188 | r = rados.connect(); | |
189 | if (r < 0) { | |
190 | derr << "error connecting to local cluster" << dendl; | |
191 | return EXIT_FAILURE; | |
192 | } | |
193 | ||
194 | r = rados.ioctx_create(pool_name.c_str(), io_ctx); | |
195 | if (r < 0) { | |
196 | derr << "error finding local pool " << pool_name << ": " | |
197 | << cpp_strerror(r) << dendl; | |
198 | return EXIT_FAILURE; | |
199 | } | |
200 | ||
201 | r = rbd.open(io_ctx, image, image_name.c_str()); | |
202 | if (r < 0) { | |
203 | derr << "error opening image " << image_name << ": " | |
204 | << cpp_strerror(r) << dendl; | |
205 | return EXIT_FAILURE; | |
206 | } | |
207 | ||
208 | write_image(image); | |
209 | return EXIT_SUCCESS; | |
210 | } |