#ifndef ROCKSDB_HDFS_FILE_C
#define ROCKSDB_HDFS_FILE_C
-#include <algorithm>
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
+#include <algorithm>
#include <iostream>
#include <sstream>
#include "rocksdb/status.h"
+#include "util/logging.h"
#include "util/string_util.h"
#define HDFS_EXISTS 0
// Log error message
static Status IOError(const std::string& context, int err_number) {
- return (err_number == ENOSPC) ?
- Status::NoSpace(context, strerror(err_number)) :
- Status::IOError(context, strerror(err_number));
+ return (err_number == ENOSPC)
+ ? Status::NoSpace(context, strerror(err_number))
+ : (err_number == ENOENT)
+ ? Status::PathNotFound(context, strerror(err_number))
+ : Status::IOError(context, strerror(err_number));
}
// assume that there is one global logger for now. It is not thread-safe,
filename_.c_str());
const char* src = data.data();
size_t left = data.size();
- size_t ret = hdfsWrite(fileSys_, hfile_, src, left);
+ size_t ret = hdfsWrite(fileSys_, hfile_, src, static_cast<tSize>(left));
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Appended %s\n",
filename_.c_str());
if (ret != left) {
// This is used by HdfsLogger to write data to the debug log file
virtual Status Append(const char* src, size_t size) {
- if (hdfsWrite(fileSys_, hfile_, src, size) != (tSize)size) {
+ if (hdfsWrite(fileSys_, hfile_, src, static_cast<tSize>(size)) !=
+ static_cast<tSize>(size)) {
return IOError(filename_, errno);
}
return Status::OK();
Status HdfsCloseHelper() {
ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsLogger closed %s\n",
file_->getName().c_str());
- Status s = file_->Close();
if (mylog != nullptr && mylog == this) {
mylog = nullptr;
}
- return s;
+ return Status::OK();
}
protected:
file_->getName().c_str());
}
- virtual ~HdfsLogger() {
+ ~HdfsLogger() override {
if (!closed_) {
closed_ = true;
HdfsCloseHelper();
}
}
- virtual void Logv(const char* format, va_list ap) {
+ using Logger::Logv;
+ void Logv(const char* format, va_list ap) override {
const uint64_t thread_id = (*gettid_)();
// We try twice: the first time with a fixed-size stack allocated buffer,
// open a file for sequential reading
Status HdfsEnv::NewSequentialFile(const std::string& fname,
- unique_ptr<SequentialFile>* result,
- const EnvOptions& options) {
+ std::unique_ptr<SequentialFile>* result,
+ const EnvOptions& /*options*/) {
result->reset();
HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
if (f == nullptr || !f->isValid()) {
// open a file for random reading
Status HdfsEnv::NewRandomAccessFile(const std::string& fname,
- unique_ptr<RandomAccessFile>* result,
- const EnvOptions& options) {
+ std::unique_ptr<RandomAccessFile>* result,
+ const EnvOptions& /*options*/) {
result->reset();
HdfsReadableFile* f = new HdfsReadableFile(fileSys_, fname);
if (f == nullptr || !f->isValid()) {
// create a new file for writing
Status HdfsEnv::NewWritableFile(const std::string& fname,
- unique_ptr<WritableFile>* result,
- const EnvOptions& options) {
+ std::unique_ptr<WritableFile>* result,
+ const EnvOptions& /*options*/) {
result->reset();
Status s;
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
explicit HdfsDirectory(int fd) : fd_(fd) {}
~HdfsDirectory() {}
- virtual Status Fsync() { return Status::OK(); }
+ Status Fsync() override { return Status::OK(); }
+
+ int GetFd() const { return fd_; }
private:
int fd_;
};
Status HdfsEnv::NewDirectory(const std::string& name,
- unique_ptr<Directory>* result) {
+ std::unique_ptr<Directory>* result) {
int value = hdfsExists(fileSys_, name.c_str());
switch (value) {
case HDFS_EXISTS:
pHdfsFileInfo = hdfsListDirectory(fileSys_, path.c_str(), &numEntries);
if (numEntries >= 0) {
for(int i = 0; i < numEntries; i++) {
- char* pathname = pHdfsFileInfo[i].mName;
- char* filename = std::rindex(pathname, '/');
- if (filename != nullptr) {
- result->push_back(filename+1);
+ std::string pathname(pHdfsFileInfo[i].mName);
+ size_t pos = pathname.rfind("/");
+ if (std::string::npos != pos) {
+ result->push_back(pathname.substr(pos + 1));
}
}
if (pHdfsFileInfo != nullptr) {
return IOError(src, errno);
}
-Status HdfsEnv::LockFile(const std::string& fname, FileLock** lock) {
+Status HdfsEnv::LockFile(const std::string& /*fname*/, FileLock** lock) {
// there isn's a very good way to atomically check and create
// a file via libhdfs
*lock = nullptr;
return Status::OK();
}
-Status HdfsEnv::UnlockFile(FileLock* lock) {
- return Status::OK();
-}
+Status HdfsEnv::UnlockFile(FileLock* /*lock*/) { return Status::OK(); }
Status HdfsEnv::NewLogger(const std::string& fname,
- shared_ptr<Logger>* result) {
+ std::shared_ptr<Logger>* result) {
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
if (f == nullptr || !f->isValid()) {
delete f;
// dummy placeholders used when HDFS is not available
namespace rocksdb {
Status HdfsEnv::NewSequentialFile(const std::string& /*fname*/,
- unique_ptr<SequentialFile>* /*result*/,
+ std::unique_ptr<SequentialFile>* /*result*/,
const EnvOptions& /*options*/) {
return Status::NotSupported("Not compiled with hdfs support");
- }
+}
Status NewHdfsEnv(Env** /*hdfs_env*/, const std::string& /*fsname*/) {
return Status::NotSupported("Not compiled with hdfs support");