]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/env/env_test.cc
import 14.2.4 nautilus point release
[ceph.git] / ceph / src / rocksdb / env / env_test.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #ifndef OS_WIN
11 #include <sys/ioctl.h>
12 #endif
13
14 #ifdef ROCKSDB_MALLOC_USABLE_SIZE
15 #ifdef OS_FREEBSD
16 #include <malloc_np.h>
17 #else
18 #include <malloc.h>
19 #endif
20 #endif
21 #include <sys/types.h>
22
23 #include <iostream>
24 #include <unordered_set>
25 #include <atomic>
26 #include <list>
27
28 #ifdef OS_LINUX
29 #include <fcntl.h>
30 #include <linux/fs.h>
31 #include <stdlib.h>
32 #include <sys/stat.h>
33 #include <unistd.h>
34 #endif
35
36 #ifdef ROCKSDB_FALLOCATE_PRESENT
37 #include <errno.h>
38 #endif
39
40 #include "env/env_chroot.h"
41 #include "port/port.h"
42 #include "rocksdb/env.h"
43 #include "util/coding.h"
44 #include "util/log_buffer.h"
45 #include "util/mutexlock.h"
46 #include "util/string_util.h"
47 #include "util/sync_point.h"
48 #include "util/testharness.h"
49 #include "util/testutil.h"
50
51 #ifdef OS_LINUX
52 static const size_t kPageSize = sysconf(_SC_PAGESIZE);
53 #else
54 static const size_t kPageSize = 4 * 1024;
55 #endif
56
57 namespace rocksdb {
58
59 static const int kDelayMicros = 100000;
60
61 struct Deleter {
62 explicit Deleter(void (*fn)(void*)) : fn_(fn) {}
63
64 void operator()(void* ptr) {
65 assert(fn_);
66 assert(ptr);
67 (*fn_)(ptr);
68 }
69
70 void (*fn_)(void*);
71 };
72
73 std::unique_ptr<char, Deleter> NewAligned(const size_t size, const char ch) {
74 char* ptr = nullptr;
75 #ifdef OS_WIN
76 if (nullptr == (ptr = reinterpret_cast<char*>(_aligned_malloc(size, kPageSize)))) {
77 return std::unique_ptr<char, Deleter>(nullptr, Deleter(_aligned_free));
78 }
79 std::unique_ptr<char, Deleter> uptr(ptr, Deleter(_aligned_free));
80 #else
81 if (posix_memalign(reinterpret_cast<void**>(&ptr), kPageSize, size) != 0) {
82 return std::unique_ptr<char, Deleter>(nullptr, Deleter(free));
83 }
84 std::unique_ptr<char, Deleter> uptr(ptr, Deleter(free));
85 #endif
86 memset(uptr.get(), ch, size);
87 return uptr;
88 }
89
90 class EnvPosixTest : public testing::Test {
91 private:
92 port::Mutex mu_;
93 std::string events_;
94
95 public:
96 Env* env_;
97 bool direct_io_;
98 EnvPosixTest() : env_(Env::Default()), direct_io_(false) {}
99 };
100
101 class EnvPosixTestWithParam
102 : public EnvPosixTest,
103 public ::testing::WithParamInterface<std::pair<Env*, bool>> {
104 public:
105 EnvPosixTestWithParam() {
106 std::pair<Env*, bool> param_pair = GetParam();
107 env_ = param_pair.first;
108 direct_io_ = param_pair.second;
109 }
110
111 void WaitThreadPoolsEmpty() {
112 // Wait until the thread pools are empty.
113 while (env_->GetThreadPoolQueueLen(Env::Priority::LOW) != 0) {
114 Env::Default()->SleepForMicroseconds(kDelayMicros);
115 }
116 while (env_->GetThreadPoolQueueLen(Env::Priority::HIGH) != 0) {
117 Env::Default()->SleepForMicroseconds(kDelayMicros);
118 }
119 }
120
121 ~EnvPosixTestWithParam() override { WaitThreadPoolsEmpty(); }
122 };
123
124 static void SetBool(void* ptr) {
125 reinterpret_cast<std::atomic<bool>*>(ptr)->store(true);
126 }
127
128 TEST_F(EnvPosixTest, DISABLED_RunImmediately) {
129 for (int pri = Env::BOTTOM; pri < Env::TOTAL; ++pri) {
130 std::atomic<bool> called(false);
131 env_->SetBackgroundThreads(1, static_cast<Env::Priority>(pri));
132 env_->Schedule(&SetBool, &called, static_cast<Env::Priority>(pri));
133 Env::Default()->SleepForMicroseconds(kDelayMicros);
134 ASSERT_TRUE(called.load());
135 }
136 }
137
138 TEST_F(EnvPosixTest, RunEventually) {
139 std::atomic<bool> called(false);
140 env_->StartThread(&SetBool, &called);
141 env_->WaitForJoin();
142 ASSERT_TRUE(called.load());
143 }
144
145 #ifdef OS_WIN
146 TEST_F(EnvPosixTest, AreFilesSame) {
147 {
148 bool tmp;
149 if (env_->AreFilesSame("", "", &tmp).IsNotSupported()) {
150 fprintf(stderr,
151 "skipping EnvBasicTestWithParam.AreFilesSame due to "
152 "unsupported Env::AreFilesSame\n");
153 return;
154 }
155 }
156
157 const EnvOptions soptions;
158 auto* env = Env::Default();
159 std::string same_file_name = test::PerThreadDBPath(env, "same_file");
160 std::string same_file_link_name = same_file_name + "_link";
161
162 std::unique_ptr<WritableFile> same_file;
163 ASSERT_OK(env->NewWritableFile(same_file_name,
164 &same_file, soptions));
165 same_file->Append("random_data");
166 ASSERT_OK(same_file->Flush());
167 same_file.reset();
168
169 ASSERT_OK(env->LinkFile(same_file_name, same_file_link_name));
170 bool result = false;
171 ASSERT_OK(env->AreFilesSame(same_file_name, same_file_link_name, &result));
172 ASSERT_TRUE(result);
173 }
174 #endif
175
176 #ifdef OS_LINUX
177 TEST_F(EnvPosixTest, DISABLED_FilePermission) {
178 // Only works for Linux environment
179 if (env_ == Env::Default()) {
180 EnvOptions soptions;
181 std::vector<std::string> fileNames{
182 test::PerThreadDBPath(env_, "testfile"),
183 test::PerThreadDBPath(env_, "testfile1")};
184 std::unique_ptr<WritableFile> wfile;
185 ASSERT_OK(env_->NewWritableFile(fileNames[0], &wfile, soptions));
186 ASSERT_OK(env_->NewWritableFile(fileNames[1], &wfile, soptions));
187 wfile.reset();
188 std::unique_ptr<RandomRWFile> rwfile;
189 ASSERT_OK(env_->NewRandomRWFile(fileNames[1], &rwfile, soptions));
190
191 struct stat sb;
192 for (const auto& filename : fileNames) {
193 if (::stat(filename.c_str(), &sb) == 0) {
194 ASSERT_EQ(sb.st_mode & 0777, 0644);
195 }
196 env_->DeleteFile(filename);
197 }
198
199 env_->SetAllowNonOwnerAccess(false);
200 ASSERT_OK(env_->NewWritableFile(fileNames[0], &wfile, soptions));
201 ASSERT_OK(env_->NewWritableFile(fileNames[1], &wfile, soptions));
202 wfile.reset();
203 ASSERT_OK(env_->NewRandomRWFile(fileNames[1], &rwfile, soptions));
204
205 for (const auto& filename : fileNames) {
206 if (::stat(filename.c_str(), &sb) == 0) {
207 ASSERT_EQ(sb.st_mode & 0777, 0600);
208 }
209 env_->DeleteFile(filename);
210 }
211 }
212 }
213 #endif
214
215 TEST_F(EnvPosixTest, MemoryMappedFileBuffer) {
216 const int kFileBytes = 1 << 15; // 32 KB
217 std::string expected_data;
218 std::string fname = test::PerThreadDBPath(env_, "testfile");
219 {
220 std::unique_ptr<WritableFile> wfile;
221 const EnvOptions soptions;
222 ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
223
224 Random rnd(301);
225 test::RandomString(&rnd, kFileBytes, &expected_data);
226 ASSERT_OK(wfile->Append(expected_data));
227 }
228
229 std::unique_ptr<MemoryMappedFileBuffer> mmap_buffer;
230 Status status = env_->NewMemoryMappedFileBuffer(fname, &mmap_buffer);
231 // it should be supported at least on linux
232 #if !defined(OS_LINUX)
233 if (status.IsNotSupported()) {
234 fprintf(stderr,
235 "skipping EnvPosixTest.MemoryMappedFileBuffer due to "
236 "unsupported Env::NewMemoryMappedFileBuffer\n");
237 return;
238 }
239 #endif // !defined(OS_LINUX)
240
241 ASSERT_OK(status);
242 ASSERT_NE(nullptr, mmap_buffer.get());
243 ASSERT_NE(nullptr, mmap_buffer->GetBase());
244 ASSERT_EQ(kFileBytes, mmap_buffer->GetLen());
245 std::string actual_data(reinterpret_cast<const char*>(mmap_buffer->GetBase()),
246 mmap_buffer->GetLen());
247 ASSERT_EQ(expected_data, actual_data);
248 }
249
250 TEST_P(EnvPosixTestWithParam, UnSchedule) {
251 std::atomic<bool> called(false);
252 env_->SetBackgroundThreads(1, Env::LOW);
253
254 /* Block the low priority queue */
255 test::SleepingBackgroundTask sleeping_task, sleeping_task1;
256 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
257 Env::Priority::LOW);
258
259 /* Schedule another task */
260 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task1,
261 Env::Priority::LOW, &sleeping_task1);
262
263 /* Remove it with a different tag */
264 ASSERT_EQ(0, env_->UnSchedule(&called, Env::Priority::LOW));
265
266 /* Remove it from the queue with the right tag */
267 ASSERT_EQ(1, env_->UnSchedule(&sleeping_task1, Env::Priority::LOW));
268
269 // Unblock background thread
270 sleeping_task.WakeUp();
271
272 /* Schedule another task */
273 env_->Schedule(&SetBool, &called);
274 for (int i = 0; i < kDelayMicros; i++) {
275 if (called.load()) {
276 break;
277 }
278 Env::Default()->SleepForMicroseconds(1);
279 }
280 ASSERT_TRUE(called.load());
281
282 ASSERT_TRUE(!sleeping_task.IsSleeping() && !sleeping_task1.IsSleeping());
283 WaitThreadPoolsEmpty();
284 }
285
286 // This tests assumes that the last scheduled
287 // task will run last. In fact, in the allotted
288 // sleeping time nothing may actually run or they may
289 // run in any order. The purpose of the test is unclear.
290 #ifndef OS_WIN
291 TEST_P(EnvPosixTestWithParam, RunMany) {
292 std::atomic<int> last_id(0);
293
294 struct CB {
295 std::atomic<int>* last_id_ptr; // Pointer to shared slot
296 int id; // Order# for the execution of this callback
297
298 CB(std::atomic<int>* p, int i) : last_id_ptr(p), id(i) {}
299
300 static void Run(void* v) {
301 CB* cb = reinterpret_cast<CB*>(v);
302 int cur = cb->last_id_ptr->load();
303 ASSERT_EQ(cb->id - 1, cur);
304 cb->last_id_ptr->store(cb->id);
305 }
306 };
307
308 // Schedule in different order than start time
309 CB cb1(&last_id, 1);
310 CB cb2(&last_id, 2);
311 CB cb3(&last_id, 3);
312 CB cb4(&last_id, 4);
313 env_->Schedule(&CB::Run, &cb1);
314 env_->Schedule(&CB::Run, &cb2);
315 env_->Schedule(&CB::Run, &cb3);
316 env_->Schedule(&CB::Run, &cb4);
317
318 Env::Default()->SleepForMicroseconds(kDelayMicros);
319 int cur = last_id.load(std::memory_order_acquire);
320 ASSERT_EQ(4, cur);
321 WaitThreadPoolsEmpty();
322 }
323 #endif
324
325 struct State {
326 port::Mutex mu;
327 int val;
328 int num_running;
329 };
330
331 static void ThreadBody(void* arg) {
332 State* s = reinterpret_cast<State*>(arg);
333 s->mu.Lock();
334 s->val += 1;
335 s->num_running -= 1;
336 s->mu.Unlock();
337 }
338
339 TEST_P(EnvPosixTestWithParam, StartThread) {
340 State state;
341 state.val = 0;
342 state.num_running = 3;
343 for (int i = 0; i < 3; i++) {
344 env_->StartThread(&ThreadBody, &state);
345 }
346 while (true) {
347 state.mu.Lock();
348 int num = state.num_running;
349 state.mu.Unlock();
350 if (num == 0) {
351 break;
352 }
353 Env::Default()->SleepForMicroseconds(kDelayMicros);
354 }
355 ASSERT_EQ(state.val, 3);
356 WaitThreadPoolsEmpty();
357 }
358
359 TEST_P(EnvPosixTestWithParam, TwoPools) {
360 // Data structures to signal tasks to run.
361 port::Mutex mutex;
362 port::CondVar cv(&mutex);
363 bool should_start = false;
364
365 class CB {
366 public:
367 CB(const std::string& pool_name, int pool_size, port::Mutex* trigger_mu,
368 port::CondVar* trigger_cv, bool* _should_start)
369 : mu_(),
370 num_running_(0),
371 num_finished_(0),
372 pool_size_(pool_size),
373 pool_name_(pool_name),
374 trigger_mu_(trigger_mu),
375 trigger_cv_(trigger_cv),
376 should_start_(_should_start) {}
377
378 static void Run(void* v) {
379 CB* cb = reinterpret_cast<CB*>(v);
380 cb->Run();
381 }
382
383 void Run() {
384 {
385 MutexLock l(&mu_);
386 num_running_++;
387 // make sure we don't have more than pool_size_ jobs running.
388 ASSERT_LE(num_running_, pool_size_.load());
389 }
390
391 {
392 MutexLock l(trigger_mu_);
393 while (!(*should_start_)) {
394 trigger_cv_->Wait();
395 }
396 }
397
398 {
399 MutexLock l(&mu_);
400 num_running_--;
401 num_finished_++;
402 }
403 }
404
405 int NumFinished() {
406 MutexLock l(&mu_);
407 return num_finished_;
408 }
409
410 void Reset(int pool_size) {
411 pool_size_.store(pool_size);
412 num_finished_ = 0;
413 }
414
415 private:
416 port::Mutex mu_;
417 int num_running_;
418 int num_finished_;
419 std::atomic<int> pool_size_;
420 std::string pool_name_;
421 port::Mutex* trigger_mu_;
422 port::CondVar* trigger_cv_;
423 bool* should_start_;
424 };
425
426 const int kLowPoolSize = 2;
427 const int kHighPoolSize = 4;
428 const int kJobs = 8;
429
430 CB low_pool_job("low", kLowPoolSize, &mutex, &cv, &should_start);
431 CB high_pool_job("high", kHighPoolSize, &mutex, &cv, &should_start);
432
433 env_->SetBackgroundThreads(kLowPoolSize);
434 env_->SetBackgroundThreads(kHighPoolSize, Env::Priority::HIGH);
435
436 ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
437 ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
438
439 // schedule same number of jobs in each pool
440 for (int i = 0; i < kJobs; i++) {
441 env_->Schedule(&CB::Run, &low_pool_job);
442 env_->Schedule(&CB::Run, &high_pool_job, Env::Priority::HIGH);
443 }
444 // Wait a short while for the jobs to be dispatched.
445 int sleep_count = 0;
446 while ((unsigned int)(kJobs - kLowPoolSize) !=
447 env_->GetThreadPoolQueueLen(Env::Priority::LOW) ||
448 (unsigned int)(kJobs - kHighPoolSize) !=
449 env_->GetThreadPoolQueueLen(Env::Priority::HIGH)) {
450 env_->SleepForMicroseconds(kDelayMicros);
451 if (++sleep_count > 100) {
452 break;
453 }
454 }
455
456 ASSERT_EQ((unsigned int)(kJobs - kLowPoolSize),
457 env_->GetThreadPoolQueueLen());
458 ASSERT_EQ((unsigned int)(kJobs - kLowPoolSize),
459 env_->GetThreadPoolQueueLen(Env::Priority::LOW));
460 ASSERT_EQ((unsigned int)(kJobs - kHighPoolSize),
461 env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
462
463 // Trigger jobs to run.
464 {
465 MutexLock l(&mutex);
466 should_start = true;
467 cv.SignalAll();
468 }
469
470 // wait for all jobs to finish
471 while (low_pool_job.NumFinished() < kJobs ||
472 high_pool_job.NumFinished() < kJobs) {
473 env_->SleepForMicroseconds(kDelayMicros);
474 }
475
476 ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
477 ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
478
479 // Hold jobs to schedule;
480 should_start = false;
481
482 // call IncBackgroundThreadsIfNeeded to two pools. One increasing and
483 // the other decreasing
484 env_->IncBackgroundThreadsIfNeeded(kLowPoolSize - 1, Env::Priority::LOW);
485 env_->IncBackgroundThreadsIfNeeded(kHighPoolSize + 1, Env::Priority::HIGH);
486 high_pool_job.Reset(kHighPoolSize + 1);
487 low_pool_job.Reset(kLowPoolSize);
488
489 // schedule same number of jobs in each pool
490 for (int i = 0; i < kJobs; i++) {
491 env_->Schedule(&CB::Run, &low_pool_job);
492 env_->Schedule(&CB::Run, &high_pool_job, Env::Priority::HIGH);
493 }
494 // Wait a short while for the jobs to be dispatched.
495 sleep_count = 0;
496 while ((unsigned int)(kJobs - kLowPoolSize) !=
497 env_->GetThreadPoolQueueLen(Env::Priority::LOW) ||
498 (unsigned int)(kJobs - (kHighPoolSize + 1)) !=
499 env_->GetThreadPoolQueueLen(Env::Priority::HIGH)) {
500 env_->SleepForMicroseconds(kDelayMicros);
501 if (++sleep_count > 100) {
502 break;
503 }
504 }
505 ASSERT_EQ((unsigned int)(kJobs - kLowPoolSize),
506 env_->GetThreadPoolQueueLen());
507 ASSERT_EQ((unsigned int)(kJobs - kLowPoolSize),
508 env_->GetThreadPoolQueueLen(Env::Priority::LOW));
509 ASSERT_EQ((unsigned int)(kJobs - (kHighPoolSize + 1)),
510 env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
511
512 // Trigger jobs to run.
513 {
514 MutexLock l(&mutex);
515 should_start = true;
516 cv.SignalAll();
517 }
518
519 // wait for all jobs to finish
520 while (low_pool_job.NumFinished() < kJobs ||
521 high_pool_job.NumFinished() < kJobs) {
522 env_->SleepForMicroseconds(kDelayMicros);
523 }
524
525 env_->SetBackgroundThreads(kHighPoolSize, Env::Priority::HIGH);
526 WaitThreadPoolsEmpty();
527 }
528
529 TEST_P(EnvPosixTestWithParam, DecreaseNumBgThreads) {
530 std::vector<test::SleepingBackgroundTask> tasks(10);
531
532 // Set number of thread to 1 first.
533 env_->SetBackgroundThreads(1, Env::Priority::HIGH);
534 Env::Default()->SleepForMicroseconds(kDelayMicros);
535
536 // Schedule 3 tasks. 0 running; Task 1, 2 waiting.
537 for (size_t i = 0; i < 3; i++) {
538 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &tasks[i],
539 Env::Priority::HIGH);
540 Env::Default()->SleepForMicroseconds(kDelayMicros);
541 }
542 ASSERT_EQ(2U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
543 ASSERT_TRUE(tasks[0].IsSleeping());
544 ASSERT_TRUE(!tasks[1].IsSleeping());
545 ASSERT_TRUE(!tasks[2].IsSleeping());
546
547 // Increase to 2 threads. Task 0, 1 running; 2 waiting
548 env_->SetBackgroundThreads(2, Env::Priority::HIGH);
549 Env::Default()->SleepForMicroseconds(kDelayMicros);
550 ASSERT_EQ(1U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
551 ASSERT_TRUE(tasks[0].IsSleeping());
552 ASSERT_TRUE(tasks[1].IsSleeping());
553 ASSERT_TRUE(!tasks[2].IsSleeping());
554
555 // Shrink back to 1 thread. Still task 0, 1 running, 2 waiting
556 env_->SetBackgroundThreads(1, Env::Priority::HIGH);
557 Env::Default()->SleepForMicroseconds(kDelayMicros);
558 ASSERT_EQ(1U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
559 ASSERT_TRUE(tasks[0].IsSleeping());
560 ASSERT_TRUE(tasks[1].IsSleeping());
561 ASSERT_TRUE(!tasks[2].IsSleeping());
562
563 // The last task finishes. Task 0 running, 2 waiting.
564 tasks[1].WakeUp();
565 Env::Default()->SleepForMicroseconds(kDelayMicros);
566 ASSERT_EQ(1U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
567 ASSERT_TRUE(tasks[0].IsSleeping());
568 ASSERT_TRUE(!tasks[1].IsSleeping());
569 ASSERT_TRUE(!tasks[2].IsSleeping());
570
571 // Increase to 5 threads. Task 0 and 2 running.
572 env_->SetBackgroundThreads(5, Env::Priority::HIGH);
573 Env::Default()->SleepForMicroseconds(kDelayMicros);
574 ASSERT_EQ((unsigned int)0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
575 ASSERT_TRUE(tasks[0].IsSleeping());
576 ASSERT_TRUE(tasks[2].IsSleeping());
577
578 // Change number of threads a couple of times while there is no sufficient
579 // tasks.
580 env_->SetBackgroundThreads(7, Env::Priority::HIGH);
581 Env::Default()->SleepForMicroseconds(kDelayMicros);
582 tasks[2].WakeUp();
583 ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
584 env_->SetBackgroundThreads(3, Env::Priority::HIGH);
585 Env::Default()->SleepForMicroseconds(kDelayMicros);
586 ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
587 env_->SetBackgroundThreads(4, Env::Priority::HIGH);
588 Env::Default()->SleepForMicroseconds(kDelayMicros);
589 ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
590 env_->SetBackgroundThreads(5, Env::Priority::HIGH);
591 Env::Default()->SleepForMicroseconds(kDelayMicros);
592 ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
593 env_->SetBackgroundThreads(4, Env::Priority::HIGH);
594 Env::Default()->SleepForMicroseconds(kDelayMicros);
595 ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
596
597 Env::Default()->SleepForMicroseconds(kDelayMicros * 50);
598
599 // Enqueue 5 more tasks. Thread pool size now is 4.
600 // Task 0, 3, 4, 5 running;6, 7 waiting.
601 for (size_t i = 3; i < 8; i++) {
602 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &tasks[i],
603 Env::Priority::HIGH);
604 }
605 Env::Default()->SleepForMicroseconds(kDelayMicros);
606 ASSERT_EQ(2U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
607 ASSERT_TRUE(tasks[3].IsSleeping());
608 ASSERT_TRUE(tasks[4].IsSleeping());
609 ASSERT_TRUE(tasks[5].IsSleeping());
610 ASSERT_TRUE(!tasks[6].IsSleeping());
611 ASSERT_TRUE(!tasks[7].IsSleeping());
612
613 // Wake up task 0, 3 and 4. Task 5, 6, 7 running.
614 tasks[0].WakeUp();
615 tasks[3].WakeUp();
616 tasks[4].WakeUp();
617
618 Env::Default()->SleepForMicroseconds(kDelayMicros);
619 ASSERT_EQ((unsigned int)0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
620 for (size_t i = 5; i < 8; i++) {
621 ASSERT_TRUE(tasks[i].IsSleeping());
622 }
623
624 // Shrink back to 1 thread. Still task 5, 6, 7 running
625 env_->SetBackgroundThreads(1, Env::Priority::HIGH);
626 Env::Default()->SleepForMicroseconds(kDelayMicros);
627 ASSERT_TRUE(tasks[5].IsSleeping());
628 ASSERT_TRUE(tasks[6].IsSleeping());
629 ASSERT_TRUE(tasks[7].IsSleeping());
630
631 // Wake up task 6. Task 5, 7 running
632 tasks[6].WakeUp();
633 Env::Default()->SleepForMicroseconds(kDelayMicros);
634 ASSERT_TRUE(tasks[5].IsSleeping());
635 ASSERT_TRUE(!tasks[6].IsSleeping());
636 ASSERT_TRUE(tasks[7].IsSleeping());
637
638 // Wake up threads 7. Task 5 running
639 tasks[7].WakeUp();
640 Env::Default()->SleepForMicroseconds(kDelayMicros);
641 ASSERT_TRUE(!tasks[7].IsSleeping());
642
643 // Enqueue thread 8 and 9. Task 5 running; one of 8, 9 might be running.
644 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &tasks[8],
645 Env::Priority::HIGH);
646 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &tasks[9],
647 Env::Priority::HIGH);
648 Env::Default()->SleepForMicroseconds(kDelayMicros);
649 ASSERT_GT(env_->GetThreadPoolQueueLen(Env::Priority::HIGH), (unsigned int)0);
650 ASSERT_TRUE(!tasks[8].IsSleeping() || !tasks[9].IsSleeping());
651
652 // Increase to 4 threads. Task 5, 8, 9 running.
653 env_->SetBackgroundThreads(4, Env::Priority::HIGH);
654 Env::Default()->SleepForMicroseconds(kDelayMicros);
655 ASSERT_EQ((unsigned int)0, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
656 ASSERT_TRUE(tasks[8].IsSleeping());
657 ASSERT_TRUE(tasks[9].IsSleeping());
658
659 // Shrink to 1 thread
660 env_->SetBackgroundThreads(1, Env::Priority::HIGH);
661
662 // Wake up thread 9.
663 tasks[9].WakeUp();
664 Env::Default()->SleepForMicroseconds(kDelayMicros);
665 ASSERT_TRUE(!tasks[9].IsSleeping());
666 ASSERT_TRUE(tasks[8].IsSleeping());
667
668 // Wake up thread 8
669 tasks[8].WakeUp();
670 Env::Default()->SleepForMicroseconds(kDelayMicros);
671 ASSERT_TRUE(!tasks[8].IsSleeping());
672
673 // Wake up the last thread
674 tasks[5].WakeUp();
675
676 Env::Default()->SleepForMicroseconds(kDelayMicros);
677 ASSERT_TRUE(!tasks[5].IsSleeping());
678 WaitThreadPoolsEmpty();
679 }
680
681 #if (defined OS_LINUX || defined OS_WIN)
682 // Travis doesn't support fallocate or getting unique ID from files for whatever
683 // reason.
684 #ifndef TRAVIS
685
686 namespace {
687 bool IsSingleVarint(const std::string& s) {
688 Slice slice(s);
689
690 uint64_t v;
691 if (!GetVarint64(&slice, &v)) {
692 return false;
693 }
694
695 return slice.size() == 0;
696 }
697
698 bool IsUniqueIDValid(const std::string& s) {
699 return !s.empty() && !IsSingleVarint(s);
700 }
701
702 const size_t MAX_ID_SIZE = 100;
703 char temp_id[MAX_ID_SIZE];
704
705
706 } // namespace
707
708 // Determine whether we can use the FS_IOC_GETVERSION ioctl
709 // on a file in directory DIR. Create a temporary file therein,
710 // try to apply the ioctl (save that result), cleanup and
711 // return the result. Return true if it is supported, and
712 // false if anything fails.
713 // Note that this function "knows" that dir has just been created
714 // and is empty, so we create a simply-named test file: "f".
715 bool ioctl_support__FS_IOC_GETVERSION(const std::string& dir) {
716 #ifdef OS_WIN
717 return true;
718 #else
719 const std::string file = dir + "/f";
720 int fd;
721 do {
722 fd = open(file.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
723 } while (fd < 0 && errno == EINTR);
724 long int version;
725 bool ok = (fd >= 0 && ioctl(fd, FS_IOC_GETVERSION, &version) >= 0);
726
727 close(fd);
728 unlink(file.c_str());
729
730 return ok;
731 #endif
732 }
733
734 // To ensure that Env::GetUniqueId-related tests work correctly, the files
735 // should be stored in regular storage like "hard disk" or "flash device",
736 // and not on a tmpfs file system (like /dev/shm and /tmp on some systems).
737 // Otherwise we cannot get the correct id.
738 //
739 // This function serves as the replacement for test::TmpDir(), which may be
740 // customized to be on a file system that doesn't work with GetUniqueId().
741
742 class IoctlFriendlyTmpdir {
743 public:
744 explicit IoctlFriendlyTmpdir() {
745 char dir_buf[100];
746
747 const char *fmt = "%s/rocksdb.XXXXXX";
748 const char *tmp = getenv("TEST_IOCTL_FRIENDLY_TMPDIR");
749
750 #ifdef OS_WIN
751 #define rmdir _rmdir
752 if(tmp == nullptr) {
753 tmp = getenv("TMP");
754 }
755
756 snprintf(dir_buf, sizeof dir_buf, fmt, tmp);
757 auto result = _mktemp(dir_buf);
758 assert(result != nullptr);
759 BOOL ret = CreateDirectory(dir_buf, NULL);
760 assert(ret == TRUE);
761 dir_ = dir_buf;
762 #else
763 std::list<std::string> candidate_dir_list = {"/var/tmp", "/tmp"};
764
765 // If $TEST_IOCTL_FRIENDLY_TMPDIR/rocksdb.XXXXXX fits, use
766 // $TEST_IOCTL_FRIENDLY_TMPDIR; subtract 2 for the "%s", and
767 // add 1 for the trailing NUL byte.
768 if (tmp && strlen(tmp) + strlen(fmt) - 2 + 1 <= sizeof dir_buf) {
769 // use $TEST_IOCTL_FRIENDLY_TMPDIR value
770 candidate_dir_list.push_front(tmp);
771 }
772
773 for (const std::string& d : candidate_dir_list) {
774 snprintf(dir_buf, sizeof dir_buf, fmt, d.c_str());
775 if (mkdtemp(dir_buf)) {
776 if (ioctl_support__FS_IOC_GETVERSION(dir_buf)) {
777 dir_ = dir_buf;
778 return;
779 } else {
780 // Diagnose ioctl-related failure only if this is the
781 // directory specified via that envvar.
782 if (tmp && tmp == d) {
783 fprintf(stderr, "TEST_IOCTL_FRIENDLY_TMPDIR-specified directory is "
784 "not suitable: %s\n", d.c_str());
785 }
786 rmdir(dir_buf); // ignore failure
787 }
788 } else {
789 // mkdtemp failed: diagnose it, but don't give up.
790 fprintf(stderr, "mkdtemp(%s/...) failed: %s\n", d.c_str(),
791 strerror(errno));
792 }
793 }
794
795 fprintf(stderr, "failed to find an ioctl-friendly temporary directory;"
796 " specify one via the TEST_IOCTL_FRIENDLY_TMPDIR envvar\n");
797 std::abort();
798 #endif
799 }
800
801 ~IoctlFriendlyTmpdir() {
802 rmdir(dir_.c_str());
803 }
804
805 const std::string& name() const {
806 return dir_;
807 }
808
809 private:
810 std::string dir_;
811 };
812
813 #ifndef ROCKSDB_LITE
814 TEST_F(EnvPosixTest, PositionedAppend) {
815 std::unique_ptr<WritableFile> writable_file;
816 EnvOptions options;
817 options.use_direct_writes = true;
818 options.use_mmap_writes = false;
819 IoctlFriendlyTmpdir ift;
820 ASSERT_OK(env_->NewWritableFile(ift.name() + "/f", &writable_file, options));
821 const size_t kBlockSize = 4096;
822 const size_t kDataSize = kPageSize;
823 // Write a page worth of 'a'
824 auto data_ptr = NewAligned(kDataSize, 'a');
825 Slice data_a(data_ptr.get(), kDataSize);
826 ASSERT_OK(writable_file->PositionedAppend(data_a, 0U));
827 // Write a page worth of 'b' right after the first sector
828 data_ptr = NewAligned(kDataSize, 'b');
829 Slice data_b(data_ptr.get(), kDataSize);
830 ASSERT_OK(writable_file->PositionedAppend(data_b, kBlockSize));
831 ASSERT_OK(writable_file->Close());
832 // The file now has 1 sector worth of a followed by a page worth of b
833
834 // Verify the above
835 std::unique_ptr<SequentialFile> seq_file;
836 ASSERT_OK(env_->NewSequentialFile(ift.name() + "/f", &seq_file, options));
837 char scratch[kPageSize * 2];
838 Slice result;
839 ASSERT_OK(seq_file->Read(sizeof(scratch), &result, scratch));
840 ASSERT_EQ(kPageSize + kBlockSize, result.size());
841 ASSERT_EQ('a', result[kBlockSize - 1]);
842 ASSERT_EQ('b', result[kBlockSize]);
843 }
844 #endif // !ROCKSDB_LITE
845
846 // Only works in linux platforms
847 TEST_P(EnvPosixTestWithParam, RandomAccessUniqueID) {
848 // Create file.
849 if (env_ == Env::Default()) {
850 EnvOptions soptions;
851 soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
852 IoctlFriendlyTmpdir ift;
853 std::string fname = ift.name() + "/testfile";
854 std::unique_ptr<WritableFile> wfile;
855 ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
856
857 std::unique_ptr<RandomAccessFile> file;
858
859 // Get Unique ID
860 ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
861 size_t id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
862 ASSERT_TRUE(id_size > 0);
863 std::string unique_id1(temp_id, id_size);
864 ASSERT_TRUE(IsUniqueIDValid(unique_id1));
865
866 // Get Unique ID again
867 ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
868 id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
869 ASSERT_TRUE(id_size > 0);
870 std::string unique_id2(temp_id, id_size);
871 ASSERT_TRUE(IsUniqueIDValid(unique_id2));
872
873 // Get Unique ID again after waiting some time.
874 env_->SleepForMicroseconds(1000000);
875 ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
876 id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
877 ASSERT_TRUE(id_size > 0);
878 std::string unique_id3(temp_id, id_size);
879 ASSERT_TRUE(IsUniqueIDValid(unique_id3));
880
881 // Check IDs are the same.
882 ASSERT_EQ(unique_id1, unique_id2);
883 ASSERT_EQ(unique_id2, unique_id3);
884
885 // Delete the file
886 env_->DeleteFile(fname);
887 }
888 }
889
890 // only works in linux platforms
891 #ifdef ROCKSDB_FALLOCATE_PRESENT
892 TEST_P(EnvPosixTestWithParam, AllocateTest) {
893 if (env_ == Env::Default()) {
894 IoctlFriendlyTmpdir ift;
895 std::string fname = ift.name() + "/preallocate_testfile";
896
897 // Try fallocate in a file to see whether the target file system supports
898 // it.
899 // Skip the test if fallocate is not supported.
900 std::string fname_test_fallocate = ift.name() + "/preallocate_testfile_2";
901 int fd = -1;
902 do {
903 fd = open(fname_test_fallocate.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
904 } while (fd < 0 && errno == EINTR);
905 ASSERT_GT(fd, 0);
906
907 int alloc_status = fallocate(fd, 0, 0, 1);
908
909 int err_number = 0;
910 if (alloc_status != 0) {
911 err_number = errno;
912 fprintf(stderr, "Warning: fallocate() fails, %s\n", strerror(err_number));
913 }
914 close(fd);
915 ASSERT_OK(env_->DeleteFile(fname_test_fallocate));
916 if (alloc_status != 0 && err_number == EOPNOTSUPP) {
917 // The filesystem containing the file does not support fallocate
918 return;
919 }
920
921 EnvOptions soptions;
922 soptions.use_mmap_writes = false;
923 soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
924 std::unique_ptr<WritableFile> wfile;
925 ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
926
927 // allocate 100 MB
928 size_t kPreallocateSize = 100 * 1024 * 1024;
929 size_t kBlockSize = 512;
930 size_t kPageSize = 4096;
931 size_t kDataSize = 1024 * 1024;
932 auto data_ptr = NewAligned(kDataSize, 'A');
933 Slice data(data_ptr.get(), kDataSize);
934 wfile->SetPreallocationBlockSize(kPreallocateSize);
935 wfile->PrepareWrite(wfile->GetFileSize(), kDataSize);
936 ASSERT_OK(wfile->Append(data));
937 ASSERT_OK(wfile->Flush());
938
939 struct stat f_stat;
940 ASSERT_EQ(stat(fname.c_str(), &f_stat), 0);
941 ASSERT_EQ((unsigned int)kDataSize, f_stat.st_size);
942 // verify that blocks are preallocated
943 // Note here that we don't check the exact number of blocks preallocated --
944 // we only require that number of allocated blocks is at least what we
945 // expect.
946 // It looks like some FS give us more blocks that we asked for. That's fine.
947 // It might be worth investigating further.
948 ASSERT_LE((unsigned int)(kPreallocateSize / kBlockSize), f_stat.st_blocks);
949
950 // close the file, should deallocate the blocks
951 wfile.reset();
952
953 stat(fname.c_str(), &f_stat);
954 ASSERT_EQ((unsigned int)kDataSize, f_stat.st_size);
955 // verify that preallocated blocks were deallocated on file close
956 // Because the FS might give us more blocks, we add a full page to the size
957 // and expect the number of blocks to be less or equal to that.
958 ASSERT_GE((f_stat.st_size + kPageSize + kBlockSize - 1) / kBlockSize,
959 (unsigned int)f_stat.st_blocks);
960 }
961 }
962 #endif // ROCKSDB_FALLOCATE_PRESENT
963
964 // Returns true if any of the strings in ss are the prefix of another string.
965 bool HasPrefix(const std::unordered_set<std::string>& ss) {
966 for (const std::string& s: ss) {
967 if (s.empty()) {
968 return true;
969 }
970 for (size_t i = 1; i < s.size(); ++i) {
971 if (ss.count(s.substr(0, i)) != 0) {
972 return true;
973 }
974 }
975 }
976 return false;
977 }
978
979 // Only works in linux and WIN platforms
980 TEST_P(EnvPosixTestWithParam, RandomAccessUniqueIDConcurrent) {
981 if (env_ == Env::Default()) {
982 // Check whether a bunch of concurrently existing files have unique IDs.
983 EnvOptions soptions;
984 soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
985
986 // Create the files
987 IoctlFriendlyTmpdir ift;
988 std::vector<std::string> fnames;
989 for (int i = 0; i < 1000; ++i) {
990 fnames.push_back(ift.name() + "/" + "testfile" + ToString(i));
991
992 // Create file.
993 std::unique_ptr<WritableFile> wfile;
994 ASSERT_OK(env_->NewWritableFile(fnames[i], &wfile, soptions));
995 }
996
997 // Collect and check whether the IDs are unique.
998 std::unordered_set<std::string> ids;
999 for (const std::string fname : fnames) {
1000 std::unique_ptr<RandomAccessFile> file;
1001 std::string unique_id;
1002 ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
1003 size_t id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
1004 ASSERT_TRUE(id_size > 0);
1005 unique_id = std::string(temp_id, id_size);
1006 ASSERT_TRUE(IsUniqueIDValid(unique_id));
1007
1008 ASSERT_TRUE(ids.count(unique_id) == 0);
1009 ids.insert(unique_id);
1010 }
1011
1012 // Delete the files
1013 for (const std::string fname : fnames) {
1014 ASSERT_OK(env_->DeleteFile(fname));
1015 }
1016
1017 ASSERT_TRUE(!HasPrefix(ids));
1018 }
1019 }
1020
1021 // Only works in linux and WIN platforms
1022 TEST_P(EnvPosixTestWithParam, RandomAccessUniqueIDDeletes) {
1023 if (env_ == Env::Default()) {
1024 EnvOptions soptions;
1025 soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
1026
1027 IoctlFriendlyTmpdir ift;
1028 std::string fname = ift.name() + "/" + "testfile";
1029
1030 // Check that after file is deleted we don't get same ID again in a new
1031 // file.
1032 std::unordered_set<std::string> ids;
1033 for (int i = 0; i < 1000; ++i) {
1034 // Create file.
1035 {
1036 std::unique_ptr<WritableFile> wfile;
1037 ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
1038 }
1039
1040 // Get Unique ID
1041 std::string unique_id;
1042 {
1043 std::unique_ptr<RandomAccessFile> file;
1044 ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
1045 size_t id_size = file->GetUniqueId(temp_id, MAX_ID_SIZE);
1046 ASSERT_TRUE(id_size > 0);
1047 unique_id = std::string(temp_id, id_size);
1048 }
1049
1050 ASSERT_TRUE(IsUniqueIDValid(unique_id));
1051 ASSERT_TRUE(ids.count(unique_id) == 0);
1052 ids.insert(unique_id);
1053
1054 // Delete the file
1055 ASSERT_OK(env_->DeleteFile(fname));
1056 }
1057
1058 ASSERT_TRUE(!HasPrefix(ids));
1059 }
1060 }
1061
1062 // Only works in linux platforms
1063 #ifdef OS_WIN
1064 TEST_P(EnvPosixTestWithParam, DISABLED_InvalidateCache) {
1065 #else
1066 TEST_P(EnvPosixTestWithParam, InvalidateCache) {
1067 #endif
1068 rocksdb::SyncPoint::GetInstance()->EnableProcessing();
1069 EnvOptions soptions;
1070 soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
1071 std::string fname = test::PerThreadDBPath(env_, "testfile");
1072
1073 const size_t kSectorSize = 512;
1074 auto data = NewAligned(kSectorSize, 0);
1075 Slice slice(data.get(), kSectorSize);
1076
1077 // Create file.
1078 {
1079 std::unique_ptr<WritableFile> wfile;
1080 #if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
1081 if (soptions.use_direct_writes) {
1082 soptions.use_direct_writes = false;
1083 }
1084 #endif
1085 ASSERT_OK(env_->NewWritableFile(fname, &wfile, soptions));
1086 ASSERT_OK(wfile->Append(slice));
1087 ASSERT_OK(wfile->InvalidateCache(0, 0));
1088 ASSERT_OK(wfile->Close());
1089 }
1090
1091 // Random Read
1092 {
1093 std::unique_ptr<RandomAccessFile> file;
1094 auto scratch = NewAligned(kSectorSize, 0);
1095 Slice result;
1096 #if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
1097 if (soptions.use_direct_reads) {
1098 soptions.use_direct_reads = false;
1099 }
1100 #endif
1101 ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
1102 ASSERT_OK(file->Read(0, kSectorSize, &result, scratch.get()));
1103 ASSERT_EQ(memcmp(scratch.get(), data.get(), kSectorSize), 0);
1104 ASSERT_OK(file->InvalidateCache(0, 11));
1105 ASSERT_OK(file->InvalidateCache(0, 0));
1106 }
1107
1108 // Sequential Read
1109 {
1110 std::unique_ptr<SequentialFile> file;
1111 auto scratch = NewAligned(kSectorSize, 0);
1112 Slice result;
1113 #if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
1114 if (soptions.use_direct_reads) {
1115 soptions.use_direct_reads = false;
1116 }
1117 #endif
1118 ASSERT_OK(env_->NewSequentialFile(fname, &file, soptions));
1119 if (file->use_direct_io()) {
1120 ASSERT_OK(file->PositionedRead(0, kSectorSize, &result, scratch.get()));
1121 } else {
1122 ASSERT_OK(file->Read(kSectorSize, &result, scratch.get()));
1123 }
1124 ASSERT_EQ(memcmp(scratch.get(), data.get(), kSectorSize), 0);
1125 ASSERT_OK(file->InvalidateCache(0, 11));
1126 ASSERT_OK(file->InvalidateCache(0, 0));
1127 }
1128 // Delete the file
1129 ASSERT_OK(env_->DeleteFile(fname));
1130 rocksdb::SyncPoint::GetInstance()->ClearTrace();
1131 }
1132 #endif // not TRAVIS
1133 #endif // OS_LINUX || OS_WIN
1134
1135 class TestLogger : public Logger {
1136 public:
1137 using Logger::Logv;
1138 void Logv(const char* format, va_list ap) override {
1139 log_count++;
1140
1141 char new_format[550];
1142 std::fill_n(new_format, sizeof(new_format), '2');
1143 {
1144 va_list backup_ap;
1145 va_copy(backup_ap, ap);
1146 int n = vsnprintf(new_format, sizeof(new_format) - 1, format, backup_ap);
1147 // 48 bytes for extra information + bytes allocated
1148
1149 // When we have n == -1 there is not a terminating zero expected
1150 #ifdef OS_WIN
1151 if (n < 0) {
1152 char_0_count++;
1153 }
1154 #endif
1155
1156 if (new_format[0] == '[') {
1157 // "[DEBUG] "
1158 ASSERT_TRUE(n <= 56 + (512 - static_cast<int>(sizeof(struct timeval))));
1159 } else {
1160 ASSERT_TRUE(n <= 48 + (512 - static_cast<int>(sizeof(struct timeval))));
1161 }
1162 va_end(backup_ap);
1163 }
1164
1165 for (size_t i = 0; i < sizeof(new_format); i++) {
1166 if (new_format[i] == 'x') {
1167 char_x_count++;
1168 } else if (new_format[i] == '\0') {
1169 char_0_count++;
1170 }
1171 }
1172 }
1173 int log_count;
1174 int char_x_count;
1175 int char_0_count;
1176 };
1177
1178 TEST_P(EnvPosixTestWithParam, LogBufferTest) {
1179 TestLogger test_logger;
1180 test_logger.SetInfoLogLevel(InfoLogLevel::INFO_LEVEL);
1181 test_logger.log_count = 0;
1182 test_logger.char_x_count = 0;
1183 test_logger.char_0_count = 0;
1184 LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, &test_logger);
1185 LogBuffer log_buffer_debug(DEBUG_LEVEL, &test_logger);
1186
1187 char bytes200[200];
1188 std::fill_n(bytes200, sizeof(bytes200), '1');
1189 bytes200[sizeof(bytes200) - 1] = '\0';
1190 char bytes600[600];
1191 std::fill_n(bytes600, sizeof(bytes600), '1');
1192 bytes600[sizeof(bytes600) - 1] = '\0';
1193 char bytes9000[9000];
1194 std::fill_n(bytes9000, sizeof(bytes9000), '1');
1195 bytes9000[sizeof(bytes9000) - 1] = '\0';
1196
1197 ROCKS_LOG_BUFFER(&log_buffer, "x%sx", bytes200);
1198 ROCKS_LOG_BUFFER(&log_buffer, "x%sx", bytes600);
1199 ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx%sx", bytes200, bytes200, bytes200);
1200 ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx", bytes200, bytes600);
1201 ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx", bytes600, bytes9000);
1202
1203 ROCKS_LOG_BUFFER(&log_buffer_debug, "x%sx", bytes200);
1204 test_logger.SetInfoLogLevel(DEBUG_LEVEL);
1205 ROCKS_LOG_BUFFER(&log_buffer_debug, "x%sx%sx%sx", bytes600, bytes9000,
1206 bytes200);
1207
1208 ASSERT_EQ(0, test_logger.log_count);
1209 log_buffer.FlushBufferToLog();
1210 log_buffer_debug.FlushBufferToLog();
1211 ASSERT_EQ(6, test_logger.log_count);
1212 ASSERT_EQ(6, test_logger.char_0_count);
1213 ASSERT_EQ(10, test_logger.char_x_count);
1214 }
1215
1216 class TestLogger2 : public Logger {
1217 public:
1218 explicit TestLogger2(size_t max_log_size) : max_log_size_(max_log_size) {}
1219 using Logger::Logv;
1220 void Logv(const char* format, va_list ap) override {
1221 char new_format[2000];
1222 std::fill_n(new_format, sizeof(new_format), '2');
1223 {
1224 va_list backup_ap;
1225 va_copy(backup_ap, ap);
1226 int n = vsnprintf(new_format, sizeof(new_format) - 1, format, backup_ap);
1227 // 48 bytes for extra information + bytes allocated
1228 ASSERT_TRUE(
1229 n <= 48 + static_cast<int>(max_log_size_ - sizeof(struct timeval)));
1230 ASSERT_TRUE(n > static_cast<int>(max_log_size_ - sizeof(struct timeval)));
1231 va_end(backup_ap);
1232 }
1233 }
1234 size_t max_log_size_;
1235 };
1236
1237 TEST_P(EnvPosixTestWithParam, LogBufferMaxSizeTest) {
1238 char bytes9000[9000];
1239 std::fill_n(bytes9000, sizeof(bytes9000), '1');
1240 bytes9000[sizeof(bytes9000) - 1] = '\0';
1241
1242 for (size_t max_log_size = 256; max_log_size <= 1024;
1243 max_log_size += 1024 - 256) {
1244 TestLogger2 test_logger(max_log_size);
1245 test_logger.SetInfoLogLevel(InfoLogLevel::INFO_LEVEL);
1246 LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, &test_logger);
1247 ROCKS_LOG_BUFFER_MAX_SZ(&log_buffer, max_log_size, "%s", bytes9000);
1248 log_buffer.FlushBufferToLog();
1249 }
1250 }
1251
1252 TEST_P(EnvPosixTestWithParam, Preallocation) {
1253 rocksdb::SyncPoint::GetInstance()->EnableProcessing();
1254 const std::string src = test::PerThreadDBPath(env_, "testfile");
1255 std::unique_ptr<WritableFile> srcfile;
1256 EnvOptions soptions;
1257 soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
1258 #if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX) && !defined(OS_OPENBSD) && !defined(OS_FREEBSD)
1259 if (soptions.use_direct_writes) {
1260 rocksdb::SyncPoint::GetInstance()->SetCallBack(
1261 "NewWritableFile:O_DIRECT", [&](void* arg) {
1262 int* val = static_cast<int*>(arg);
1263 *val &= ~O_DIRECT;
1264 });
1265 }
1266 #endif
1267 ASSERT_OK(env_->NewWritableFile(src, &srcfile, soptions));
1268 srcfile->SetPreallocationBlockSize(1024 * 1024);
1269
1270 // No writes should mean no preallocation
1271 size_t block_size, last_allocated_block;
1272 srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
1273 ASSERT_EQ(last_allocated_block, 0UL);
1274
1275 // Small write should preallocate one block
1276 size_t kStrSize = 4096;
1277 auto data = NewAligned(kStrSize, 'A');
1278 Slice str(data.get(), kStrSize);
1279 srcfile->PrepareWrite(srcfile->GetFileSize(), kStrSize);
1280 srcfile->Append(str);
1281 srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
1282 ASSERT_EQ(last_allocated_block, 1UL);
1283
1284 // Write an entire preallocation block, make sure we increased by two.
1285 {
1286 auto buf_ptr = NewAligned(block_size, ' ');
1287 Slice buf(buf_ptr.get(), block_size);
1288 srcfile->PrepareWrite(srcfile->GetFileSize(), block_size);
1289 srcfile->Append(buf);
1290 srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
1291 ASSERT_EQ(last_allocated_block, 2UL);
1292 }
1293
1294 // Write five more blocks at once, ensure we're where we need to be.
1295 {
1296 auto buf_ptr = NewAligned(block_size * 5, ' ');
1297 Slice buf = Slice(buf_ptr.get(), block_size * 5);
1298 srcfile->PrepareWrite(srcfile->GetFileSize(), buf.size());
1299 srcfile->Append(buf);
1300 srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
1301 ASSERT_EQ(last_allocated_block, 7UL);
1302 }
1303 rocksdb::SyncPoint::GetInstance()->ClearTrace();
1304 }
1305
1306 // Test that the two ways to get children file attributes (in bulk or
1307 // individually) behave consistently.
1308 TEST_P(EnvPosixTestWithParam, ConsistentChildrenAttributes) {
1309 rocksdb::SyncPoint::GetInstance()->EnableProcessing();
1310 EnvOptions soptions;
1311 soptions.use_direct_reads = soptions.use_direct_writes = direct_io_;
1312 const int kNumChildren = 10;
1313
1314 std::string data;
1315 for (int i = 0; i < kNumChildren; ++i) {
1316 const std::string path =
1317 test::TmpDir(env_) + "/" + "testfile_" + std::to_string(i);
1318 std::unique_ptr<WritableFile> file;
1319 #if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX) && !defined(OS_OPENBSD) && !defined(OS_FREEBSD)
1320 if (soptions.use_direct_writes) {
1321 rocksdb::SyncPoint::GetInstance()->SetCallBack(
1322 "NewWritableFile:O_DIRECT", [&](void* arg) {
1323 int* val = static_cast<int*>(arg);
1324 *val &= ~O_DIRECT;
1325 });
1326 }
1327 #endif
1328 ASSERT_OK(env_->NewWritableFile(path, &file, soptions));
1329 auto buf_ptr = NewAligned(data.size(), 'T');
1330 Slice buf(buf_ptr.get(), data.size());
1331 file->Append(buf);
1332 data.append(std::string(4096, 'T'));
1333 }
1334
1335 std::vector<Env::FileAttributes> file_attrs;
1336 ASSERT_OK(env_->GetChildrenFileAttributes(test::TmpDir(env_), &file_attrs));
1337 for (int i = 0; i < kNumChildren; ++i) {
1338 const std::string name = "testfile_" + std::to_string(i);
1339 const std::string path = test::TmpDir(env_) + "/" + name;
1340
1341 auto file_attrs_iter = std::find_if(
1342 file_attrs.begin(), file_attrs.end(),
1343 [&name](const Env::FileAttributes& fm) { return fm.name == name; });
1344 ASSERT_TRUE(file_attrs_iter != file_attrs.end());
1345 uint64_t size;
1346 ASSERT_OK(env_->GetFileSize(path, &size));
1347 ASSERT_EQ(size, 4096 * i);
1348 ASSERT_EQ(size, file_attrs_iter->size_bytes);
1349 }
1350 rocksdb::SyncPoint::GetInstance()->ClearTrace();
1351 }
1352
1353 // Test that all WritableFileWrapper forwards all calls to WritableFile.
1354 TEST_P(EnvPosixTestWithParam, WritableFileWrapper) {
1355 class Base : public WritableFile {
1356 public:
1357 mutable int *step_;
1358
1359 void inc(int x) const {
1360 EXPECT_EQ(x, (*step_)++);
1361 }
1362
1363 explicit Base(int* step) : step_(step) {
1364 inc(0);
1365 }
1366
1367 Status Append(const Slice& /*data*/) override {
1368 inc(1);
1369 return Status::OK();
1370 }
1371
1372 Status PositionedAppend(const Slice& /*data*/,
1373 uint64_t /*offset*/) override {
1374 inc(2);
1375 return Status::OK();
1376 }
1377
1378 Status Truncate(uint64_t /*size*/) override {
1379 inc(3);
1380 return Status::OK();
1381 }
1382
1383 Status Close() override {
1384 inc(4);
1385 return Status::OK();
1386 }
1387
1388 Status Flush() override {
1389 inc(5);
1390 return Status::OK();
1391 }
1392
1393 Status Sync() override {
1394 inc(6);
1395 return Status::OK();
1396 }
1397
1398 Status Fsync() override {
1399 inc(7);
1400 return Status::OK();
1401 }
1402
1403 bool IsSyncThreadSafe() const override {
1404 inc(8);
1405 return true;
1406 }
1407
1408 bool use_direct_io() const override {
1409 inc(9);
1410 return true;
1411 }
1412
1413 size_t GetRequiredBufferAlignment() const override {
1414 inc(10);
1415 return 0;
1416 }
1417
1418 void SetIOPriority(Env::IOPriority /*pri*/) override { inc(11); }
1419
1420 Env::IOPriority GetIOPriority() override {
1421 inc(12);
1422 return Env::IOPriority::IO_LOW;
1423 }
1424
1425 void SetWriteLifeTimeHint(Env::WriteLifeTimeHint /*hint*/) override {
1426 inc(13);
1427 }
1428
1429 Env::WriteLifeTimeHint GetWriteLifeTimeHint() override {
1430 inc(14);
1431 return Env::WriteLifeTimeHint::WLTH_NOT_SET;
1432 }
1433
1434 uint64_t GetFileSize() override {
1435 inc(15);
1436 return 0;
1437 }
1438
1439 void SetPreallocationBlockSize(size_t /*size*/) override { inc(16); }
1440
1441 void GetPreallocationStatus(size_t* /*block_size*/,
1442 size_t* /*last_allocated_block*/) override {
1443 inc(17);
1444 }
1445
1446 size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const override {
1447 inc(18);
1448 return 0;
1449 }
1450
1451 Status InvalidateCache(size_t /*offset*/, size_t /*length*/) override {
1452 inc(19);
1453 return Status::OK();
1454 }
1455
1456 Status RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/) override {
1457 inc(20);
1458 return Status::OK();
1459 }
1460
1461 void PrepareWrite(size_t /*offset*/, size_t /*len*/) override { inc(21); }
1462
1463 Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) override {
1464 inc(22);
1465 return Status::OK();
1466 }
1467
1468 public:
1469 ~Base() override { inc(23); }
1470 };
1471
1472 class Wrapper : public WritableFileWrapper {
1473 public:
1474 explicit Wrapper(WritableFile* target) : WritableFileWrapper(target) {}
1475 };
1476
1477 int step = 0;
1478
1479 {
1480 Base b(&step);
1481 Wrapper w(&b);
1482 w.Append(Slice());
1483 w.PositionedAppend(Slice(), 0);
1484 w.Truncate(0);
1485 w.Close();
1486 w.Flush();
1487 w.Sync();
1488 w.Fsync();
1489 w.IsSyncThreadSafe();
1490 w.use_direct_io();
1491 w.GetRequiredBufferAlignment();
1492 w.SetIOPriority(Env::IOPriority::IO_HIGH);
1493 w.GetIOPriority();
1494 w.SetWriteLifeTimeHint(Env::WriteLifeTimeHint::WLTH_NOT_SET);
1495 w.GetWriteLifeTimeHint();
1496 w.GetFileSize();
1497 w.SetPreallocationBlockSize(0);
1498 w.GetPreallocationStatus(nullptr, nullptr);
1499 w.GetUniqueId(nullptr, 0);
1500 w.InvalidateCache(0, 0);
1501 w.RangeSync(0, 0);
1502 w.PrepareWrite(0, 0);
1503 w.Allocate(0, 0);
1504 }
1505
1506 EXPECT_EQ(24, step);
1507 }
1508
1509 TEST_P(EnvPosixTestWithParam, PosixRandomRWFile) {
1510 const std::string path = test::PerThreadDBPath(env_, "random_rw_file");
1511
1512 env_->DeleteFile(path);
1513
1514 std::unique_ptr<RandomRWFile> file;
1515
1516 // Cannot open non-existing file.
1517 ASSERT_NOK(env_->NewRandomRWFile(path, &file, EnvOptions()));
1518
1519 // Create the file using WriteableFile
1520 {
1521 std::unique_ptr<WritableFile> wf;
1522 ASSERT_OK(env_->NewWritableFile(path, &wf, EnvOptions()));
1523 }
1524
1525 ASSERT_OK(env_->NewRandomRWFile(path, &file, EnvOptions()));
1526
1527 char buf[10000];
1528 Slice read_res;
1529
1530 ASSERT_OK(file->Write(0, "ABCD"));
1531 ASSERT_OK(file->Read(0, 10, &read_res, buf));
1532 ASSERT_EQ(read_res.ToString(), "ABCD");
1533
1534 ASSERT_OK(file->Write(2, "XXXX"));
1535 ASSERT_OK(file->Read(0, 10, &read_res, buf));
1536 ASSERT_EQ(read_res.ToString(), "ABXXXX");
1537
1538 ASSERT_OK(file->Write(10, "ZZZ"));
1539 ASSERT_OK(file->Read(10, 10, &read_res, buf));
1540 ASSERT_EQ(read_res.ToString(), "ZZZ");
1541
1542 ASSERT_OK(file->Write(11, "Y"));
1543 ASSERT_OK(file->Read(10, 10, &read_res, buf));
1544 ASSERT_EQ(read_res.ToString(), "ZYZ");
1545
1546 ASSERT_OK(file->Write(200, "FFFFF"));
1547 ASSERT_OK(file->Read(200, 10, &read_res, buf));
1548 ASSERT_EQ(read_res.ToString(), "FFFFF");
1549
1550 ASSERT_OK(file->Write(205, "XXXX"));
1551 ASSERT_OK(file->Read(200, 10, &read_res, buf));
1552 ASSERT_EQ(read_res.ToString(), "FFFFFXXXX");
1553
1554 ASSERT_OK(file->Write(5, "QQQQ"));
1555 ASSERT_OK(file->Read(0, 9, &read_res, buf));
1556 ASSERT_EQ(read_res.ToString(), "ABXXXQQQQ");
1557
1558 ASSERT_OK(file->Read(2, 4, &read_res, buf));
1559 ASSERT_EQ(read_res.ToString(), "XXXQ");
1560
1561 // Close file and reopen it
1562 file->Close();
1563 ASSERT_OK(env_->NewRandomRWFile(path, &file, EnvOptions()));
1564
1565 ASSERT_OK(file->Read(0, 9, &read_res, buf));
1566 ASSERT_EQ(read_res.ToString(), "ABXXXQQQQ");
1567
1568 ASSERT_OK(file->Read(10, 3, &read_res, buf));
1569 ASSERT_EQ(read_res.ToString(), "ZYZ");
1570
1571 ASSERT_OK(file->Read(200, 9, &read_res, buf));
1572 ASSERT_EQ(read_res.ToString(), "FFFFFXXXX");
1573
1574 ASSERT_OK(file->Write(4, "TTTTTTTTTTTTTTTT"));
1575 ASSERT_OK(file->Read(0, 10, &read_res, buf));
1576 ASSERT_EQ(read_res.ToString(), "ABXXTTTTTT");
1577
1578 // Clean up
1579 env_->DeleteFile(path);
1580 }
1581
1582 class RandomRWFileWithMirrorString {
1583 public:
1584 explicit RandomRWFileWithMirrorString(RandomRWFile* _file) : file_(_file) {}
1585
1586 void Write(size_t offset, const std::string& data) {
1587 // Write to mirror string
1588 StringWrite(offset, data);
1589
1590 // Write to file
1591 Status s = file_->Write(offset, data);
1592 ASSERT_OK(s) << s.ToString();
1593 }
1594
1595 void Read(size_t offset = 0, size_t n = 1000000) {
1596 Slice str_res(nullptr, 0);
1597 if (offset < file_mirror_.size()) {
1598 size_t str_res_sz = std::min(file_mirror_.size() - offset, n);
1599 str_res = Slice(file_mirror_.data() + offset, str_res_sz);
1600 StopSliceAtNull(&str_res);
1601 }
1602
1603 Slice file_res;
1604 Status s = file_->Read(offset, n, &file_res, buf_);
1605 ASSERT_OK(s) << s.ToString();
1606 StopSliceAtNull(&file_res);
1607
1608 ASSERT_EQ(str_res.ToString(), file_res.ToString()) << offset << " " << n;
1609 }
1610
1611 void SetFile(RandomRWFile* _file) { file_ = _file; }
1612
1613 private:
1614 void StringWrite(size_t offset, const std::string& src) {
1615 if (offset + src.size() > file_mirror_.size()) {
1616 file_mirror_.resize(offset + src.size(), '\0');
1617 }
1618
1619 char* pos = const_cast<char*>(file_mirror_.data() + offset);
1620 memcpy(pos, src.data(), src.size());
1621 }
1622
1623 void StopSliceAtNull(Slice* slc) {
1624 for (size_t i = 0; i < slc->size(); i++) {
1625 if ((*slc)[i] == '\0') {
1626 *slc = Slice(slc->data(), i);
1627 break;
1628 }
1629 }
1630 }
1631
1632 char buf_[10000];
1633 RandomRWFile* file_;
1634 std::string file_mirror_;
1635 };
1636
1637 TEST_P(EnvPosixTestWithParam, PosixRandomRWFileRandomized) {
1638 const std::string path = test::PerThreadDBPath(env_, "random_rw_file_rand");
1639 env_->DeleteFile(path);
1640
1641 std::unique_ptr<RandomRWFile> file;
1642
1643 #ifdef OS_LINUX
1644 // Cannot open non-existing file.
1645 ASSERT_NOK(env_->NewRandomRWFile(path, &file, EnvOptions()));
1646 #endif
1647
1648 // Create the file using WriteableFile
1649 {
1650 std::unique_ptr<WritableFile> wf;
1651 ASSERT_OK(env_->NewWritableFile(path, &wf, EnvOptions()));
1652 }
1653
1654 ASSERT_OK(env_->NewRandomRWFile(path, &file, EnvOptions()));
1655 RandomRWFileWithMirrorString file_with_mirror(file.get());
1656
1657 Random rnd(301);
1658 std::string buf;
1659 for (int i = 0; i < 10000; i++) {
1660 // Genrate random data
1661 test::RandomString(&rnd, 10, &buf);
1662
1663 // Pick random offset for write
1664 size_t write_off = rnd.Next() % 1000;
1665 file_with_mirror.Write(write_off, buf);
1666
1667 // Pick random offset for read
1668 size_t read_off = rnd.Next() % 1000;
1669 size_t read_sz = rnd.Next() % 20;
1670 file_with_mirror.Read(read_off, read_sz);
1671
1672 if (i % 500 == 0) {
1673 // Reopen the file every 500 iters
1674 ASSERT_OK(env_->NewRandomRWFile(path, &file, EnvOptions()));
1675 file_with_mirror.SetFile(file.get());
1676 }
1677 }
1678
1679 // clean up
1680 env_->DeleteFile(path);
1681 }
1682
1683 class TestEnv : public EnvWrapper {
1684 public:
1685 explicit TestEnv() : EnvWrapper(Env::Default()),
1686 close_count(0) { }
1687
1688 class TestLogger : public Logger {
1689 public:
1690 using Logger::Logv;
1691 TestLogger(TestEnv* env_ptr) : Logger() { env = env_ptr; }
1692 ~TestLogger() override {
1693 if (!closed_) {
1694 CloseHelper();
1695 }
1696 }
1697 void Logv(const char* /*format*/, va_list /*ap*/) override{};
1698
1699 protected:
1700 Status CloseImpl() override { return CloseHelper(); }
1701
1702 private:
1703 Status CloseHelper() {
1704 env->CloseCountInc();;
1705 return Status::OK();
1706 }
1707 TestEnv* env;
1708 };
1709
1710 void CloseCountInc() { close_count++; }
1711
1712 int GetCloseCount() { return close_count; }
1713
1714 Status NewLogger(const std::string& /*fname*/,
1715 std::shared_ptr<Logger>* result) override {
1716 result->reset(new TestLogger(this));
1717 return Status::OK();
1718 }
1719
1720 private:
1721 int close_count;
1722 };
1723
1724 class EnvTest : public testing::Test {};
1725
1726 TEST_F(EnvTest, Close) {
1727 TestEnv* env = new TestEnv();
1728 std::shared_ptr<Logger> logger;
1729 Status s;
1730
1731 s = env->NewLogger("", &logger);
1732 ASSERT_EQ(s, Status::OK());
1733 logger.get()->Close();
1734 ASSERT_EQ(env->GetCloseCount(), 1);
1735 // Call Close() again. CloseHelper() should not be called again
1736 logger.get()->Close();
1737 ASSERT_EQ(env->GetCloseCount(), 1);
1738 logger.reset();
1739 ASSERT_EQ(env->GetCloseCount(), 1);
1740
1741 s = env->NewLogger("", &logger);
1742 ASSERT_EQ(s, Status::OK());
1743 logger.reset();
1744 ASSERT_EQ(env->GetCloseCount(), 2);
1745
1746 delete env;
1747 }
1748
1749 INSTANTIATE_TEST_CASE_P(DefaultEnvWithoutDirectIO, EnvPosixTestWithParam,
1750 ::testing::Values(std::pair<Env*, bool>(Env::Default(),
1751 false)));
1752 #if !defined(ROCKSDB_LITE)
1753 INSTANTIATE_TEST_CASE_P(DefaultEnvWithDirectIO, EnvPosixTestWithParam,
1754 ::testing::Values(std::pair<Env*, bool>(Env::Default(),
1755 true)));
1756 #endif // !defined(ROCKSDB_LITE)
1757
1758 #if !defined(ROCKSDB_LITE) && !defined(OS_WIN)
1759 static std::unique_ptr<Env> chroot_env(
1760 NewChrootEnv(Env::Default(), test::TmpDir(Env::Default())));
1761 INSTANTIATE_TEST_CASE_P(
1762 ChrootEnvWithoutDirectIO, EnvPosixTestWithParam,
1763 ::testing::Values(std::pair<Env*, bool>(chroot_env.get(), false)));
1764 INSTANTIATE_TEST_CASE_P(
1765 ChrootEnvWithDirectIO, EnvPosixTestWithParam,
1766 ::testing::Values(std::pair<Env*, bool>(chroot_env.get(), true)));
1767 #endif // !defined(ROCKSDB_LITE) && !defined(OS_WIN)
1768
1769 } // namespace rocksdb
1770
1771 int main(int argc, char** argv) {
1772 ::testing::InitGoogleTest(&argc, argv);
1773 return RUN_ALL_TESTS();
1774 }