1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
10 #include "rocksdb/env.h"
13 #include "options/db_options.h"
14 #include "port/port.h"
15 #include "port/sys_time.h"
16 #include "rocksdb/options.h"
17 #include "util/arena.h"
18 #include "util/autovector.h"
25 std::string
Env::PriorityToString(Env::Priority priority
) {
27 case Env::Priority::BOTTOM
:
29 case Env::Priority::LOW
:
31 case Env::Priority::HIGH
:
33 case Env::Priority::USER
:
35 case Env::Priority::TOTAL
:
41 uint64_t Env::GetThreadID() const {
42 std::hash
<std::thread::id
> hasher
;
43 return hasher(std::this_thread::get_id());
46 Status
Env::ReuseWritableFile(const std::string
& fname
,
47 const std::string
& old_fname
,
48 std::unique_ptr
<WritableFile
>* result
,
49 const EnvOptions
& options
) {
50 Status s
= RenameFile(old_fname
, fname
);
54 return NewWritableFile(fname
, result
, options
);
57 Status
Env::GetChildrenFileAttributes(const std::string
& dir
,
58 std::vector
<FileAttributes
>* result
) {
59 assert(result
!= nullptr);
60 std::vector
<std::string
> child_fnames
;
61 Status s
= GetChildren(dir
, &child_fnames
);
65 result
->resize(child_fnames
.size());
66 size_t result_size
= 0;
67 for (size_t i
= 0; i
< child_fnames
.size(); ++i
) {
68 const std::string path
= dir
+ "/" + child_fnames
[i
];
69 if (!(s
= GetFileSize(path
, &(*result
)[result_size
].size_bytes
)).ok()) {
70 if (FileExists(path
).IsNotFound()) {
71 // The file may have been deleted since we listed the directory
76 (*result
)[result_size
].name
= std::move(child_fnames
[i
]);
79 result
->resize(result_size
);
83 SequentialFile::~SequentialFile() {
86 RandomAccessFile::~RandomAccessFile() {
89 WritableFile::~WritableFile() {
92 MemoryMappedFileBuffer::~MemoryMappedFileBuffer() {}
96 Status
Logger::Close() {
105 Status
Logger::CloseImpl() { return Status::NotSupported(); }
107 FileLock::~FileLock() {
110 void LogFlush(Logger
*info_log
) {
116 static void Logv(Logger
*info_log
, const char* format
, va_list ap
) {
117 if (info_log
&& info_log
->GetInfoLogLevel() <= InfoLogLevel::INFO_LEVEL
) {
118 info_log
->Logv(InfoLogLevel::INFO_LEVEL
, format
, ap
);
122 void Log(Logger
* info_log
, const char* format
, ...) {
124 va_start(ap
, format
);
125 Logv(info_log
, format
, ap
);
129 void Logger::Logv(const InfoLogLevel log_level
, const char* format
, va_list ap
) {
130 static const char* kInfoLogLevelNames
[5] = { "DEBUG", "INFO", "WARN",
132 if (log_level
< log_level_
) {
136 if (log_level
== InfoLogLevel::INFO_LEVEL
) {
137 // Doesn't print log level if it is INFO level.
138 // This is to avoid unexpected performance regression after we add
139 // the feature of log level. All the logs before we add the feature
140 // are INFO level. We don't want to add extra costs to those existing
143 } else if (log_level
== InfoLogLevel::HEADER_LEVEL
) {
144 LogHeader(format
, ap
);
146 char new_format
[500];
147 snprintf(new_format
, sizeof(new_format
) - 1, "[%s] %s",
148 kInfoLogLevelNames
[log_level
], format
);
149 Logv(new_format
, ap
);
153 static void Logv(const InfoLogLevel log_level
, Logger
*info_log
, const char *format
, va_list ap
) {
154 if (info_log
&& info_log
->GetInfoLogLevel() <= log_level
) {
155 if (log_level
== InfoLogLevel::HEADER_LEVEL
) {
156 info_log
->LogHeader(format
, ap
);
158 info_log
->Logv(log_level
, format
, ap
);
163 void Log(const InfoLogLevel log_level
, Logger
* info_log
, const char* format
,
166 va_start(ap
, format
);
167 Logv(log_level
, info_log
, format
, ap
);
171 static void Headerv(Logger
*info_log
, const char *format
, va_list ap
) {
173 info_log
->LogHeader(format
, ap
);
177 void Header(Logger
* info_log
, const char* format
, ...) {
179 va_start(ap
, format
);
180 Headerv(info_log
, format
, ap
);
184 static void Debugv(Logger
* info_log
, const char* format
, va_list ap
) {
185 if (info_log
&& info_log
->GetInfoLogLevel() <= InfoLogLevel::DEBUG_LEVEL
) {
186 info_log
->Logv(InfoLogLevel::DEBUG_LEVEL
, format
, ap
);
190 void Debug(Logger
* info_log
, const char* format
, ...) {
192 va_start(ap
, format
);
193 Debugv(info_log
, format
, ap
);
197 static void Infov(Logger
* info_log
, const char* format
, va_list ap
) {
198 if (info_log
&& info_log
->GetInfoLogLevel() <= InfoLogLevel::INFO_LEVEL
) {
199 info_log
->Logv(InfoLogLevel::INFO_LEVEL
, format
, ap
);
203 void Info(Logger
* info_log
, const char* format
, ...) {
205 va_start(ap
, format
);
206 Infov(info_log
, format
, ap
);
210 static void Warnv(Logger
* info_log
, const char* format
, va_list ap
) {
211 if (info_log
&& info_log
->GetInfoLogLevel() <= InfoLogLevel::WARN_LEVEL
) {
212 info_log
->Logv(InfoLogLevel::WARN_LEVEL
, format
, ap
);
216 void Warn(Logger
* info_log
, const char* format
, ...) {
218 va_start(ap
, format
);
219 Warnv(info_log
, format
, ap
);
223 static void Errorv(Logger
* info_log
, const char* format
, va_list ap
) {
224 if (info_log
&& info_log
->GetInfoLogLevel() <= InfoLogLevel::ERROR_LEVEL
) {
225 info_log
->Logv(InfoLogLevel::ERROR_LEVEL
, format
, ap
);
229 void Error(Logger
* info_log
, const char* format
, ...) {
231 va_start(ap
, format
);
232 Errorv(info_log
, format
, ap
);
236 static void Fatalv(Logger
* info_log
, const char* format
, va_list ap
) {
237 if (info_log
&& info_log
->GetInfoLogLevel() <= InfoLogLevel::FATAL_LEVEL
) {
238 info_log
->Logv(InfoLogLevel::FATAL_LEVEL
, format
, ap
);
242 void Fatal(Logger
* info_log
, const char* format
, ...) {
244 va_start(ap
, format
);
245 Fatalv(info_log
, format
, ap
);
249 void LogFlush(const std::shared_ptr
<Logger
>& info_log
) {
250 LogFlush(info_log
.get());
253 void Log(const InfoLogLevel log_level
, const std::shared_ptr
<Logger
>& info_log
,
254 const char* format
, ...) {
256 va_start(ap
, format
);
257 Logv(log_level
, info_log
.get(), format
, ap
);
261 void Header(const std::shared_ptr
<Logger
>& info_log
, const char* format
, ...) {
263 va_start(ap
, format
);
264 Headerv(info_log
.get(), format
, ap
);
268 void Debug(const std::shared_ptr
<Logger
>& info_log
, const char* format
, ...) {
270 va_start(ap
, format
);
271 Debugv(info_log
.get(), format
, ap
);
275 void Info(const std::shared_ptr
<Logger
>& info_log
, const char* format
, ...) {
277 va_start(ap
, format
);
278 Infov(info_log
.get(), format
, ap
);
282 void Warn(const std::shared_ptr
<Logger
>& info_log
, const char* format
, ...) {
284 va_start(ap
, format
);
285 Warnv(info_log
.get(), format
, ap
);
289 void Error(const std::shared_ptr
<Logger
>& info_log
, const char* format
, ...) {
291 va_start(ap
, format
);
292 Errorv(info_log
.get(), format
, ap
);
296 void Fatal(const std::shared_ptr
<Logger
>& info_log
, const char* format
, ...) {
298 va_start(ap
, format
);
299 Fatalv(info_log
.get(), format
, ap
);
303 void Log(const std::shared_ptr
<Logger
>& info_log
, const char* format
, ...) {
305 va_start(ap
, format
);
306 Logv(info_log
.get(), format
, ap
);
310 Status
WriteStringToFile(Env
* env
, const Slice
& data
, const std::string
& fname
,
312 std::unique_ptr
<WritableFile
> file
;
314 Status s
= env
->NewWritableFile(fname
, &file
, soptions
);
318 s
= file
->Append(data
);
319 if (s
.ok() && should_sync
) {
323 env
->DeleteFile(fname
);
328 Status
ReadFileToString(Env
* env
, const std::string
& fname
, std::string
* data
) {
331 std::unique_ptr
<SequentialFile
> file
;
332 Status s
= env
->NewSequentialFile(fname
, &file
, soptions
);
336 static const int kBufferSize
= 8192;
337 char* space
= new char[kBufferSize
];
340 s
= file
->Read(kBufferSize
, &fragment
, space
);
344 data
->append(fragment
.data(), fragment
.size());
345 if (fragment
.empty()) {
353 EnvWrapper::~EnvWrapper() {
356 namespace { // anonymous namespace
358 void AssignEnvOptions(EnvOptions
* env_options
, const DBOptions
& options
) {
359 env_options
->use_mmap_reads
= options
.allow_mmap_reads
;
360 env_options
->use_mmap_writes
= options
.allow_mmap_writes
;
361 env_options
->use_direct_reads
= options
.use_direct_reads
;
362 env_options
->set_fd_cloexec
= options
.is_fd_close_on_exec
;
363 env_options
->bytes_per_sync
= options
.bytes_per_sync
;
364 env_options
->compaction_readahead_size
= options
.compaction_readahead_size
;
365 env_options
->random_access_max_buffer_size
=
366 options
.random_access_max_buffer_size
;
367 env_options
->rate_limiter
= options
.rate_limiter
.get();
368 env_options
->writable_file_max_buffer_size
=
369 options
.writable_file_max_buffer_size
;
370 env_options
->allow_fallocate
= options
.allow_fallocate
;
375 EnvOptions
Env::OptimizeForLogWrite(const EnvOptions
& env_options
,
376 const DBOptions
& db_options
) const {
377 EnvOptions
optimized_env_options(env_options
);
378 optimized_env_options
.bytes_per_sync
= db_options
.wal_bytes_per_sync
;
379 optimized_env_options
.writable_file_max_buffer_size
=
380 db_options
.writable_file_max_buffer_size
;
381 return optimized_env_options
;
384 EnvOptions
Env::OptimizeForManifestWrite(const EnvOptions
& env_options
) const {
388 EnvOptions
Env::OptimizeForLogRead(const EnvOptions
& env_options
) const {
389 EnvOptions
optimized_env_options(env_options
);
390 optimized_env_options
.use_direct_reads
= false;
391 return optimized_env_options
;
394 EnvOptions
Env::OptimizeForManifestRead(const EnvOptions
& env_options
) const {
395 EnvOptions
optimized_env_options(env_options
);
396 optimized_env_options
.use_direct_reads
= false;
397 return optimized_env_options
;
400 EnvOptions
Env::OptimizeForCompactionTableWrite(
401 const EnvOptions
& env_options
, const ImmutableDBOptions
& db_options
) const {
402 EnvOptions
optimized_env_options(env_options
);
403 optimized_env_options
.use_direct_writes
=
404 db_options
.use_direct_io_for_flush_and_compaction
;
405 return optimized_env_options
;
408 EnvOptions
Env::OptimizeForCompactionTableRead(
409 const EnvOptions
& env_options
, const ImmutableDBOptions
& db_options
) const {
410 EnvOptions
optimized_env_options(env_options
);
411 optimized_env_options
.use_direct_reads
= db_options
.use_direct_reads
;
412 return optimized_env_options
;
415 EnvOptions::EnvOptions(const DBOptions
& options
) {
416 AssignEnvOptions(this, options
);
419 EnvOptions::EnvOptions() {
421 AssignEnvOptions(this, options
);
425 } // namespace rocksdb