#define XATTR_SPILL_OUT_NAME "user.cephos.spill_out"
#define XATTR_NO_SPILL_OUT "0"
#define XATTR_SPILL_OUT "1"
+#define __FUNC__ __func__ << "(" << __LINE__ << ")"
//Initial features in new superblock.
static CompatSet get_fs_initial_compat_set() {
if (!((*index).index)) {
r = get_index(cid, index);
if (r < 0) {
- dout(10) << __func__ << " could not get index r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not get index r = " << r << dendl;
return r;
}
} else {
if (exist)
return -EEXIST;
- dout(25) << "lfn_link path_old: " << path_old << dendl;
- dout(25) << "lfn_link path_new: " << path_new << dendl;
+ dout(25) << __FUNC__ << ": path_old: " << path_old << dendl;
+ dout(25) << __FUNC__ << ": path_new: " << path_new << dendl;
r = ::link(path_old->path(), path_new->path());
if (r < 0)
return -errno;
if (exist)
return -EEXIST;
- dout(25) << "lfn_link path_old: " << path_old << dendl;
- dout(25) << "lfn_link path_new: " << path_new << dendl;
+ dout(25) << __FUNC__ << ": path_old: " << path_old << dendl;
+ dout(25) << __FUNC__ << ": path_new: " << path_new << dendl;
r = ::link(path_old->path(), path_new->path());
if (r < 0)
return -errno;
Index index;
int r = get_index(cid, &index);
if (r < 0) {
- dout(25) << __func__ << " get_index failed " << cpp_strerror(r) << dendl;
+ dout(25) << __FUNC__ << ": get_index failed " << cpp_strerror(r) << dendl;
return r;
}
}
}
if (force_clear_omap) {
- dout(20) << __func__ << ": clearing omap on " << o
+ dout(20) << __FUNC__ << ": clearing omap on " << o
<< " in cid " << cid << dendl;
r = object_map->clear(o, &spos);
if (r < 0 && r != -ENOENT) {
- dout(25) << __func__ << " omap clear failed " << cpp_strerror(r) << dendl;
+ dout(25) << __FUNC__ << ": omap clear failed " << cpp_strerror(r) << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
}
r = index->unlink(o);
if (r < 0) {
- dout(25) << __func__ << " index unlink failed " << cpp_strerror(r) << dendl;
+ dout(25) << __FUNC__ << ": index unlink failed " << cpp_strerror(r) << dendl;
return r;
}
return 0;
m_filestore_max_inline_xattrs(0),
m_filestore_max_xattr_value_size(0)
{
- m_filestore_kill_at.set(cct->_conf->filestore_kill_at);
+ m_filestore_kill_at = cct->_conf->filestore_kill_at;
for (int i = 0; i < m_ondisk_finisher_num; ++i) {
ostringstream oss;
oss << "filestore-ondisk-" << i;
ret = ::ftruncate(fsid_fd, 0);
if (ret < 0) {
ret = -errno;
- derr << "mkfs: failed to truncate fsid: "
+ derr << __FUNC__ << ": failed to truncate fsid: "
<< cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
ret = safe_write(fsid_fd, fsid_str, strlen(fsid_str));
if (ret < 0) {
- derr << "mkfs: failed to write fsid: "
+ derr << __FUNC__ << ": failed to write fsid: "
<< cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
if (::fsync(fsid_fd) < 0) {
ret = -errno;
- derr << "mkfs: close failed: can't write fsid: "
+ derr << __FUNC__ << ": close failed: can't write fsid: "
<< cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
dout(10) << "mkfs fsid is " << fsid << dendl;
} else {
if (!fsid.is_zero() && fsid != old_fsid) {
- derr << "mkfs on-disk fsid " << old_fsid << " != provided " << fsid << dendl;
+ derr << __FUNC__ << ": on-disk fsid " << old_fsid << " != provided " << fsid << dendl;
ret = -EINVAL;
goto close_fsid_fd;
}
fsid = old_fsid;
- dout(1) << "mkfs fsid is already set to " << fsid << dendl;
+ dout(1) << __FUNC__ << ": fsid is already set to " << fsid << dendl;
}
// version stamp
ret = write_version_stamp();
if (ret < 0) {
- derr << "mkfs: write_version_stamp() failed: "
+ derr << __FUNC__ << ": write_version_stamp() failed: "
<< cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
superblock.omap_backend = cct->_conf->filestore_omap_backend;
ret = write_superblock();
if (ret < 0) {
- derr << "mkfs: write_superblock() failed: "
+ derr << __FUNC__ << ": write_superblock() failed: "
<< cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
ret = ::fstatfs(basedir_fd, &basefs);
if (ret < 0) {
ret = -errno;
- derr << "mkfs cannot fstatfs basedir "
+ derr << __FUNC__ << ": cannot fstatfs basedir "
<< cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
ret = backend->create_current();
if (ret < 0) {
- derr << "mkfs: failed to create current/ " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": failed to create current/ " << cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
int fd = read_op_seq(&initial_seq);
if (fd < 0) {
ret = fd;
- derr << "mkfs: failed to create " << current_op_seq_fn << ": "
+ derr << __FUNC__ << ": failed to create " << current_op_seq_fn << ": "
<< cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
ret = write_op_seq(fd, 1);
if (ret < 0) {
VOID_TEMP_FAILURE_RETRY(::close(fd));
- derr << "mkfs: failed to write to " << current_op_seq_fn << ": "
+ derr << __FUNC__ << ": failed to write to " << current_op_seq_fn << ": "
<< cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
VOID_TEMP_FAILURE_RETRY(::close(current_fd));
if (ret < 0 && ret != -EEXIST) {
VOID_TEMP_FAILURE_RETRY(::close(fd));
- derr << "mkfs: failed to create snap_1: " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": failed to create snap_1: " << cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
}
}
ret = KeyValueDB::test_init(superblock.omap_backend, omap_dir);
if (ret < 0) {
- derr << "mkfs failed to create " << cct->_conf->filestore_omap_backend << dendl;
+ derr << __FUNC__ << ": failed to create " << cct->_conf->filestore_omap_backend << dendl;
goto close_fsid_fd;
}
// create fsid under omap
omap_fsid_fd = ::open(omap_fsid_fn, O_RDWR|O_CREAT, 0644);
if (omap_fsid_fd < 0) {
ret = -errno;
- derr << "mkfs: failed to open " << omap_fsid_fn << ": " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": failed to open " << omap_fsid_fn << ": " << cpp_strerror(ret) << dendl;
goto close_fsid_fd;
}
ret = ::ftruncate(omap_fsid_fd, 0);
if (ret < 0) {
ret = -errno;
- derr << "mkfs: failed to truncate fsid: "
+ derr << __FUNC__ << ": failed to truncate fsid: "
<< cpp_strerror(ret) << dendl;
goto close_omap_fsid_fd;
}
ret = safe_write(omap_fsid_fd, fsid_str, strlen(fsid_str));
if (ret < 0) {
- derr << "mkfs: failed to write fsid: "
+ derr << __FUNC__ << ": failed to write fsid: "
<< cpp_strerror(ret) << dendl;
goto close_omap_fsid_fd;
}
- dout(10) << "mkfs: write success, fsid:" << fsid_str << ", ret:" << ret << dendl;
+ dout(10) << __FUNC__ << ": write success, fsid:" << fsid_str << ", ret:" << ret << dendl;
if (::fsync(omap_fsid_fd) < 0) {
ret = -errno;
- derr << "mkfs: close failed: can't write fsid: "
+ derr << __FUNC__ << ": close failed: can't write fsid: "
<< cpp_strerror(ret) << dendl;
goto close_omap_fsid_fd;
}
dout(10) << "mkfs omap fsid is " << fsid << dendl;
} else {
if (fsid != old_omap_fsid) {
- derr << "FileStore::mkfs: " << omap_fsid_fn
+ derr << __FUNC__ << ": " << omap_fsid_fn
<< " has existed omap fsid " << old_omap_fsid
<< " != expected osd fsid " << fsid
<< dendl;
ret = -EINVAL;
goto close_omap_fsid_fd;
}
- dout(1) << "FileStore::mkfs: omap fsid is already set to " << fsid << dendl;
+ dout(1) << __FUNC__ << ": omap fsid is already set to " << fsid << dendl;
}
dout(1) << cct->_conf->filestore_omap_backend << " db exists/created" << dendl;
int fd = ::open(fn, O_RDONLY, 0644);
if (fd < 0) {
int err = errno;
- derr << "FileStore::mkjournal: open error: " << cpp_strerror(err) << dendl;
+ derr << __FUNC__ << ": open error: " << cpp_strerror(err) << dendl;
return -err;
}
ret = read_fsid(fd, &fsid);
if (ret < 0) {
- derr << "FileStore::mkjournal: read error: " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": read error: " << cpp_strerror(ret) << dendl;
VOID_TEMP_FAILURE_RETRY(::close(fd));
return ret;
}
if (ret < 0) {
ret = journal->create();
if (ret)
- derr << "mkjournal error creating journal on " << journalpath
+ derr << __FUNC__ << ": error creating journal on " << journalpath
<< ": " << cpp_strerror(ret) << dendl;
else
- dout(0) << "mkjournal created journal on " << journalpath << dendl;
+ dout(0) << __FUNC__ << ": created journal on " << journalpath << dendl;
}
delete journal;
journal = 0;
int r = ::fcntl(fsid_fd, F_SETLK, &l);
if (r < 0) {
int err = errno;
- dout(0) << "lock_fsid failed to lock " << basedir << "/fsid, is another ceph-osd still running? "
+ dout(0) << __FUNC__ << ": failed to lock " << basedir << "/fsid, is another ceph-osd still running? "
<< cpp_strerror(err) << dendl;
return -err;
}
bool FileStore::test_mount_in_use()
{
- dout(5) << "test_mount basedir " << basedir << " journal " << journalpath << dendl;
+ dout(5) << __FUNC__ << ": basedir " << basedir << " journal " << journalpath << dendl;
char fn[PATH_MAX];
snprintf(fn, sizeof(fn), "%s/fsid", basedir.c_str());
return inuse;
}
+bool FileStore::is_rotational()
+{
+ bool rotational;
+ if (backend) {
+ rotational = backend->is_rotational();
+ } else {
+ int fd = ::open(basedir.c_str(), O_RDONLY);
+ if (fd < 0)
+ return true;
+ struct statfs st;
+ int r = ::fstatfs(fd, &st);
+ ::close(fd);
+ if (r < 0) {
+ return true;
+ }
+ create_backend(st.f_type);
+ rotational = backend->is_rotational();
+ delete backend;
+ backend = NULL;
+ }
+ dout(10) << __func__ << " " << (int)rotational << dendl;
+ return rotational;
+}
+
int FileStore::_detect_fs()
{
struct statfs st;
r = backend->detect_features();
if (r < 0) {
- derr << "_detect_fs: detect_features error: " << cpp_strerror(r) << dendl;
+ derr << __FUNC__ << ": detect_features error: " << cpp_strerror(r) << dendl;
return r;
}
int tmpfd = ::open(fn, O_CREAT|O_WRONLY|O_TRUNC, 0700);
if (tmpfd < 0) {
int ret = -errno;
- derr << "_detect_fs unable to create " << fn << ": " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": unable to create " << fn << ": " << cpp_strerror(ret) << dendl;
return ret;
}
bl.push_back(std::move(bp));
bufferlist::iterator i = bl.begin();
::decode(*version, i);
- dout(10) << __func__ << " was " << *version << " vs target "
+ dout(10) << __FUNC__ << ": was " << *version << " vs target "
<< target_version << dendl;
if (*version == target_version)
return 1;
int FileStore::write_version_stamp()
{
- dout(1) << __func__ << " " << target_version << dendl;
+ dout(1) << __FUNC__ << ": " << target_version << dendl;
bufferlist bl;
::encode(target_version, bl);
int FileStore::upgrade()
{
- dout(1) << "upgrade" << dendl;
+ dout(1) << __FUNC__ << dendl;
uint32_t version;
int r = version_stamp_is_valid(&version);
memset(s, 0, sizeof(s));
int ret = safe_read(op_fd, s, sizeof(s) - 1);
if (ret < 0) {
- derr << "error reading " << current_op_seq_fn << ": " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": error reading " << current_op_seq_fn << ": " << cpp_strerror(ret) << dendl;
VOID_TEMP_FAILURE_RETRY(::close(op_fd));
assert(!m_filestore_fail_eio || ret != -EIO);
return ret;
// make sure global base dir exists
if (::access(basedir.c_str(), R_OK | W_OK)) {
ret = -errno;
- derr << "FileStore::mount: unable to access basedir '" << basedir << "': "
+ derr << __FUNC__ << ": unable to access basedir '" << basedir << "': "
<< cpp_strerror(ret) << dendl;
goto done;
}
fsid_fd = ::open(buf, O_RDWR, 0644);
if (fsid_fd < 0) {
ret = -errno;
- derr << "FileStore::mount: error opening '" << buf << "': "
+ derr << __FUNC__ << ": error opening '" << buf << "': "
<< cpp_strerror(ret) << dendl;
goto done;
}
ret = read_fsid(fsid_fd, &fsid);
if (ret < 0) {
- derr << "FileStore::mount: error reading fsid_fd: " << cpp_strerror(ret)
+ derr << __FUNC__ << ": error reading fsid_fd: " << cpp_strerror(ret)
<< dendl;
goto close_fsid_fd;
}
if (lock_fsid() < 0) {
- derr << "FileStore::mount: lock_fsid failed" << dendl;
+ derr << __FUNC__ << ": lock_fsid failed" << dendl;
ret = -EBUSY;
goto close_fsid_fd;
}
uint32_t version_stamp;
ret = version_stamp_is_valid(&version_stamp);
if (ret < 0) {
- derr << "FileStore::mount: error in version_stamp_is_valid: "
+ derr << __FUNC__ << ": error in version_stamp_is_valid: "
<< cpp_strerror(ret) << dendl;
goto close_fsid_fd;
} else if (ret == 0) {
if (do_update || (int)version_stamp < cct->_conf->filestore_update_to) {
- derr << "FileStore::mount: stale version stamp detected: "
+ derr << __FUNC__ << ": stale version stamp detected: "
<< version_stamp
<< ". Proceeding, do_update "
<< "is set, performing disk format upgrade."
do_update = true;
} else {
ret = -EINVAL;
- derr << "FileStore::mount: stale version stamp " << version_stamp
+ derr << __FUNC__ << ": stale version stamp " << version_stamp
<< ". Please run the FileStore update script before starting the "
<< "OSD, or set filestore_update_to to " << target_version
<< " (currently " << cct->_conf->filestore_update_to << ")"
// Check if this FileStore supports all the necessary features to mount
if (supported_compat_set.compare(superblock.compat_features) == -1) {
- derr << "FileStore::mount: Incompatible features set "
+ derr << __FUNC__ << ": Incompatible features set "
<< superblock.compat_features << dendl;
ret = -EINVAL;
goto close_fsid_fd;
basedir_fd = ::open(basedir.c_str(), O_RDONLY);
if (basedir_fd < 0) {
ret = -errno;
- derr << "FileStore::mount: failed to open " << basedir << ": "
+ derr << __FUNC__ << ": failed to open " << basedir << ": "
<< cpp_strerror(ret) << dendl;
basedir_fd = -1;
goto close_fsid_fd;
// test for btrfs, xattrs, etc.
ret = _detect_fs();
if (ret < 0) {
- derr << "FileStore::mount: error in _detect_fs: "
+ derr << __FUNC__ << ": error in _detect_fs: "
<< cpp_strerror(ret) << dendl;
goto close_basedir_fd;
}
list<string> ls;
ret = backend->list_checkpoints(ls);
if (ret < 0) {
- derr << "FileStore::mount: error in _list_snaps: "<< cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": error in _list_snaps: "<< cpp_strerror(ret) << dendl;
goto close_basedir_fd;
}
if (backend->can_checkpoint()) {
if (snaps.empty()) {
- dout(0) << "mount WARNING: no consistent snaps found, store may be in inconsistent state" << dendl;
+ dout(0) << __FUNC__ << ": WARNING: no consistent snaps found, store may be in inconsistent state" << dendl;
} else {
char s[NAME_MAX];
uint64_t curr_seq = 0;
<< TEXT_NORMAL << std::endl;
}
- dout(10) << "mount rolling back to consistent snap " << cp << dendl;
+ dout(10) << __FUNC__ << ": rolling back to consistent snap " << cp << dendl;
snprintf(s, sizeof(s), COMMIT_SNAP_ITEM, (long long unsigned)cp);
}
// drop current?
ret = backend->rollback_to(s);
if (ret) {
- derr << "FileStore::mount: error rolling back to " << s << ": "
+ derr << __FUNC__ << ": error rolling back to " << s << ": "
<< cpp_strerror(ret) << dendl;
goto close_basedir_fd;
}
current_fd = ::open(current_fn.c_str(), O_RDONLY);
if (current_fd < 0) {
ret = -errno;
- derr << "FileStore::mount: error opening: " << current_fn << ": " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": error opening: " << current_fn << ": " << cpp_strerror(ret) << dendl;
goto close_basedir_fd;
}
op_fd = read_op_seq(&initial_op_seq);
if (op_fd < 0) {
ret = op_fd;
- derr << "FileStore::mount: read_op_seq failed" << dendl;
+ derr << __FUNC__ << ": read_op_seq failed" << dendl;
goto close_current_fd;
}
int r = ::creat(nosnapfn, 0644);
if (r < 0) {
ret = -errno;
- derr << "FileStore::mount: failed to create current/nosnap" << dendl;
+ derr << __FUNC__ << ": failed to create current/nosnap" << dendl;
goto close_current_fd;
}
VOID_TEMP_FAILURE_RETRY(::close(r));
snprintf(omap_fsid_buf, sizeof(omap_fsid_buf), "%s/osd_uuid", omap_dir.c_str());
// if osd_uuid not exists, assume as this omap matchs corresponding osd
if (::stat(omap_fsid_buf, &omap_fsid_stat) != 0){
- dout(10) << "Filestore::mount osd_uuid not found under omap, "
+ dout(10) << __FUNC__ << ": osd_uuid not found under omap, "
<< "assume as matched."
<< dendl;
}else{
omap_fsid_fd = ::open(omap_fsid_buf, O_RDONLY, 0644);
if (omap_fsid_fd < 0) {
ret = -errno;
- derr << "FileStore::mount: error opening '" << omap_fsid_buf << "': "
+ derr << __FUNC__ << ": error opening '" << omap_fsid_buf << "': "
<< cpp_strerror(ret)
<< dendl;
goto close_current_fd;
VOID_TEMP_FAILURE_RETRY(::close(omap_fsid_fd));
omap_fsid_fd = -1; // defensive
if (ret < 0) {
- derr << "FileStore::mount: error reading omap_fsid_fd"
+ derr << __FUNC__ << ": error reading omap_fsid_fd"
<< ", omap_fsid = " << omap_fsid
<< cpp_strerror(ret)
<< dendl;
goto close_current_fd;
}
if (fsid != omap_fsid) {
- derr << "FileStore::mount: " << omap_fsid_buf
+ derr << __FUNC__ << ": " << omap_fsid_buf
<< " has existed omap fsid " << omap_fsid
<< " != expected osd fsid " << fsid
<< dendl;
omap_dir);
if (omap_store == NULL)
{
- derr << "Error creating " << superblock.omap_backend << dendl;
+ derr << __FUNC__ << ": Error creating " << superblock.omap_backend << dendl;
ret = -1;
goto close_current_fd;
}
ret = omap_store->init();
if (ret < 0) {
- derr << "Error initializing omap_store: " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": Error initializing omap_store: " << cpp_strerror(ret) << dendl;
goto close_current_fd;
}
stringstream err;
if (omap_store->create_and_open(err)) {
delete omap_store;
- derr << "Error initializing " << superblock.omap_backend
+ derr << __FUNC__ << ": Error initializing " << superblock.omap_backend
<< " : " << err.str() << dendl;
ret = -1;
goto close_current_fd;
ret = dbomap->init(do_update);
if (ret < 0) {
delete dbomap;
- derr << "Error initializing DBObjectMap: " << ret << dendl;
+ derr << __FUNC__ << ": Error initializing DBObjectMap: " << ret << dendl;
goto close_current_fd;
}
stringstream err2;
!m_filestore_journal_trailing) {
if (!backend->can_checkpoint()) {
m_filestore_journal_writeahead = true;
- dout(0) << "mount: enabling WRITEAHEAD journal mode: checkpoint is not enabled" << dendl;
+ dout(0) << __FUNC__ << ": enabling WRITEAHEAD journal mode: checkpoint is not enabled" << dendl;
} else {
m_filestore_journal_parallel = true;
- dout(0) << "mount: enabling PARALLEL journal mode: fs, checkpoint is enabled" << dendl;
+ dout(0) << __FUNC__ << ": enabling PARALLEL journal mode: fs, checkpoint is enabled" << dendl;
}
} else {
if (m_filestore_journal_writeahead)
- dout(0) << "mount: WRITEAHEAD journal mode explicitly enabled in conf" << dendl;
+ dout(0) << __FUNC__ << ": WRITEAHEAD journal mode explicitly enabled in conf" << dendl;
if (m_filestore_journal_parallel)
- dout(0) << "mount: PARALLEL journal mode explicitly enabled in conf" << dendl;
+ dout(0) << __FUNC__ << ": PARALLEL journal mode explicitly enabled in conf" << dendl;
if (m_filestore_journal_trailing)
- dout(0) << "mount: TRAILING journal mode explicitly enabled in conf" << dendl;
+ dout(0) << __FUNC__ << ": TRAILING journal mode explicitly enabled in conf" << dendl;
}
if (m_filestore_journal_writeahead)
journal->set_wait_on_full(true);
} else {
- dout(0) << "mount: no journal" << dendl;
+ dout(0) << __FUNC__ << ": no journal" << dendl;
}
ret = _sanity_check_fs();
if (ret) {
- derr << "FileStore::mount: _sanity_check_fs failed with error "
+ derr << __FUNC__ << ": _sanity_check_fs failed with error "
<< ret << dendl;
goto close_current_fd;
}
if (!m_disable_wbthrottle) {
wbthrottle.start();
} else {
- dout(0) << "mount INFO: WbThrottle is disabled" << dendl;
+ dout(0) << __FUNC__ << ": INFO: WbThrottle is disabled" << dendl;
if (cct->_conf->filestore_odsync_write) {
- dout(0) << "mount INFO: O_DSYNC write is enabled" << dendl;
+ dout(0) << __FUNC__ << ": INFO: O_DSYNC write is enabled" << dendl;
}
}
sync_thread.create("filestore_sync");
if (!(generic_flags & SKIP_JOURNAL_REPLAY)) {
ret = journal_replay(initial_op_seq);
if (ret < 0) {
- derr << "mount failed to open journal " << journalpath << ": " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": failed to open journal " << journalpath << ": " << cpp_strerror(ret) << dendl;
if (ret == -ENOTTY) {
derr << "maybe journal is not pointing to a block device and its size "
<< "wasn't configured?" << dendl;
void FileStore::init_temp_collections()
{
- dout(10) << __func__ << dendl;
+ dout(10) << __FUNC__ << dendl;
vector<coll_t> ls;
int r = list_collections(ls, true);
assert(r >= 0);
if (temps.count(temp)) {
temps.erase(temp);
} else {
- dout(10) << __func__ << " creating " << temp << dendl;
+ dout(10) << __FUNC__ << ": creating " << temp << dendl;
r = _create_collection(temp, 0, spos);
assert(r == 0);
}
}
for (set<coll_t>::iterator p = temps.begin(); p != temps.end(); ++p) {
- dout(10) << __func__ << " removing stray " << *p << dendl;
+ dout(10) << __FUNC__ << ": removing stray " << *p << dendl;
r = _collection_remove_recursive(*p, spos);
assert(r == 0);
}
int FileStore::umount()
{
- dout(5) << "umount " << basedir << dendl;
+ dout(5) << __FUNC__ << ": " << basedir << dendl;
flush();
sync();
logger->inc(l_filestore_ops);
logger->inc(l_filestore_bytes, o->bytes);
- dout(5) << "queue_op " << o << " seq " << o->op
+ dout(5) << __FUNC__ << ": " << o << " seq " << o->op
<< " " << *osr
<< " " << o->bytes << " bytes"
<< " (queue has " << throttle_ops.get_current() << " ops and " << throttle_bytes.get_current() << " bytes)"
// inject a stall?
if (cct->_conf->filestore_inject_stall) {
int orig = cct->_conf->filestore_inject_stall;
- dout(5) << "_do_op filestore_inject_stall " << orig << ", sleeping" << dendl;
+ dout(5) << __FUNC__ << ": filestore_inject_stall " << orig << ", sleeping" << dendl;
sleep(orig);
cct->_conf->set_val("filestore_inject_stall", "0");
- dout(5) << "_do_op done stalling" << dendl;
+ dout(5) << __FUNC__ << ": done stalling" << dendl;
}
osr->apply_lock.Lock();
Op *o = osr->peek_queue();
o->trace.event("op_apply_start");
apply_manager.op_apply_start(o->op);
- dout(5) << "_do_op " << o << " seq " << o->op << " " << *osr << "/" << osr->parent << " start" << dendl;
+ dout(5) << __FUNC__ << ": " << o << " seq " << o->op << " " << *osr << "/" << osr->parent << " start" << dendl;
o->trace.event("_do_transactions start");
int r = _do_transactions(o->tls, o->op, &handle);
o->trace.event("op_apply_finish");
apply_manager.op_apply_finish(o->op);
- dout(10) << "_do_op " << o << " seq " << o->op << " r = " << r
+ dout(10) << __FUNC__ << ": " << o << " seq " << o->op << " r = " << r
<< ", finisher " << o->onreadable << " " << o->onreadable_sync << dendl;
o->tls.clear();
utime_t lat = ceph_clock_now();
lat -= o->start;
- dout(10) << "_finish_op " << o << " seq " << o->op << " " << *osr << "/" << osr->parent << " lat " << lat << dendl;
+ dout(10) << __FUNC__ << ": " << o << " seq " << o->op << " " << *osr << "/" << osr->parent << " lat " << lat << dendl;
osr->apply_lock.Unlock(); // locked in _do_op
o->trace.event("_finish_op");
tls, &onreadable, &ondisk, &onreadable_sync);
if (cct->_conf->objectstore_blackhole) {
- dout(0) << __func__ << " objectstore_blackhole = TRUE, dropping transaction"
+ dout(0) << __FUNC__ << ": objectstore_blackhole = TRUE, dropping transaction"
<< dendl;
delete ondisk;
delete onreadable;
assert(posr);
if (posr->p) {
osr = static_cast<OpSequencer *>(posr->p.get());
- dout(5) << "queue_transactions existing " << osr << " " << *osr << dendl;
+ dout(5) << __FUNC__ << ": existing " << osr << " " << *osr << dendl;
} else {
- osr = new OpSequencer(cct, next_osr_id.inc());
+ osr = new OpSequencer(cct, ++next_osr_id);
osr->set_cct(cct);
osr->parent = posr;
posr->p = osr;
- dout(5) << "queue_transactions new " << osr << " " << *osr << dendl;
+ dout(5) << __FUNC__ << ": new " << osr << " " << *osr << dendl;
}
// used to include osr information in tracepoints during transaction apply
dump_transactions(o->tls, o->op, osr);
if (m_filestore_journal_parallel) {
- dout(5) << "queue_transactions (parallel) " << o->op << " " << o->tls << dendl;
+ dout(5) << __FUNC__ << ": (parallel) " << o->op << " " << o->tls << dendl;
trace.keyval("journal mode", "parallel");
trace.event("journal started");
queue_op(osr, o);
trace.event("op queued");
} else if (m_filestore_journal_writeahead) {
- dout(5) << "queue_transactions (writeahead) " << o->op << " " << o->tls << dendl;
+ dout(5) << __FUNC__ << ": (writeahead) " << o->op << " " << o->tls << dendl;
osr->queue_journal(o->op);
if (!journal) {
Op *o = build_op(tls, onreadable, onreadable_sync, osd_op);
- dout(5) << __func__ << " (no journal) " << o << " " << tls << dendl;
+ dout(5) << __FUNC__ << ": (no journal) " << o << " " << tls << dendl;
if (handle)
handle->suspend_tp_timeout();
orig_len = journal->prepare_entry(tls, &tbl);
}
uint64_t op = submit_manager.op_submit_start();
- dout(5) << "queue_transactions (trailing journal) " << op << " " << tls << dendl;
+ dout(5) << __FUNC__ << ": (trailing journal) " << op << " " << tls << dendl;
if (m_filestore_do_dump)
dump_transactions(tls, op, osr);
void FileStore::_journaled_ahead(OpSequencer *osr, Op *o, Context *ondisk)
{
- dout(5) << "_journaled_ahead " << o << " seq " << o->op << " " << *osr << " " << o->tls << dendl;
+ dout(5) << __FUNC__ << ": " << o << " seq " << o->op << " " << *osr << " " << o->tls << dendl;
o->trace.event("writeahead journal finished");
// sync all previous operations on this sequencer
int ret = object_map->sync();
if (ret < 0) {
- derr << __func__ << " : omap sync error " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": omap sync error " << cpp_strerror(ret) << dendl;
assert(0 == "_set_global_replay_guard failed");
}
ret = sync_filesystem(basedir_fd);
if (ret < 0) {
- derr << __func__ << " : sync_filesystem error " << cpp_strerror(ret) << dendl;
+ derr << __FUNC__ << ": sync_filesystem error " << cpp_strerror(ret) << dendl;
assert(0 == "_set_global_replay_guard failed");
}
int fd = ::open(fn, O_RDONLY);
if (fd < 0) {
int err = errno;
- derr << __func__ << ": " << cid << " error " << cpp_strerror(err) << dendl;
+ derr << __FUNC__ << ": " << cid << " error " << cpp_strerror(err) << dendl;
assert(0 == "_set_global_replay_guard failed");
}
int r = chain_fsetxattr<true, true>(
fd, GLOBAL_REPLAY_GUARD_XATTR, v.c_str(), v.length());
if (r < 0) {
- derr << __func__ << ": fsetxattr " << GLOBAL_REPLAY_GUARD_XATTR
+ derr << __FUNC__ << ": fsetxattr " << GLOBAL_REPLAY_GUARD_XATTR
<< " got " << cpp_strerror(r) << dendl;
assert(0 == "fsetxattr failed");
}
_inject_failure();
VOID_TEMP_FAILURE_RETRY(::close(fd));
- dout(10) << __func__ << ": " << spos << " done" << dendl;
+ dout(10) << __FUNC__ << ": " << spos << " done" << dendl;
}
int FileStore::_check_global_replay_guard(const coll_t& cid,
get_cdir(cid, fn, sizeof(fn));
int fd = ::open(fn, O_RDONLY);
if (fd < 0) {
- dout(10) << __func__ << ": " << cid << " dne" << dendl;
+ dout(10) << __FUNC__ << ": " << cid << " dne" << dendl;
return 1; // if collection does not exist, there is no guard, and we can replay.
}
char buf[100];
int r = chain_fgetxattr(fd, GLOBAL_REPLAY_GUARD_XATTR, buf, sizeof(buf));
if (r < 0) {
- dout(20) << __func__ << " no xattr" << dendl;
+ dout(20) << __FUNC__ << ": no xattr" << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
VOID_TEMP_FAILURE_RETRY(::close(fd));
return 1; // no xattr
int fd = ::open(fn, O_RDONLY);
if (fd < 0) {
int err = errno;
- derr << "_set_replay_guard " << cid << " error " << cpp_strerror(err) << dendl;
+ derr << __FUNC__ << ": " << cid << " error " << cpp_strerror(err) << dendl;
assert(0 == "_set_replay_guard failed");
}
_set_replay_guard(fd, spos, 0, in_progress);
if (backend->can_checkpoint())
return;
- dout(10) << "_set_replay_guard " << spos << (in_progress ? " START" : "") << dendl;
+ dout(10) << __FUNC__ << ": " << spos << (in_progress ? " START" : "") << dendl;
_inject_failure();
_inject_failure();
- dout(10) << "_set_replay_guard " << spos << " done" << dendl;
+ dout(10) << __FUNC__ << ": " << spos << " done" << dendl;
}
void FileStore::_close_replay_guard(const coll_t& cid,
int fd = ::open(fn, O_RDONLY);
if (fd < 0) {
int err = errno;
- derr << "_close_replay_guard " << cid << " error " << cpp_strerror(err) << dendl;
+ derr << __FUNC__ << ": " << cid << " error " << cpp_strerror(err) << dendl;
assert(0 == "_close_replay_guard failed");
}
_close_replay_guard(fd, spos);
if (backend->can_checkpoint())
return;
- dout(10) << "_close_replay_guard " << spos << dendl;
+ dout(10) << __FUNC__ << ": " << spos << dendl;
_inject_failure();
_inject_failure();
- dout(10) << "_close_replay_guard " << spos << " done" << dendl;
+ dout(10) << __FUNC__ << ": " << spos << " done" << dendl;
}
int FileStore::_check_replay_guard(const coll_t& cid, const ghobject_t &oid,
FDRef fd;
r = lfn_open(cid, oid, false, &fd);
if (r < 0) {
- dout(10) << "_check_replay_guard " << cid << " " << oid << " dne" << dendl;
+ dout(10) << __FUNC__ << ": " << cid << " " << oid << " dne" << dendl;
return 1; // if file does not exist, there is no guard, and we can replay.
}
int ret = _check_replay_guard(**fd, spos);
get_cdir(cid, fn, sizeof(fn));
int fd = ::open(fn, O_RDONLY);
if (fd < 0) {
- dout(10) << "_check_replay_guard " << cid << " dne" << dendl;
+ dout(10) << __FUNC__ << ": " << cid << " dne" << dendl;
return 1; // if collection does not exist, there is no guard, and we can replay.
}
int ret = _check_replay_guard(fd, spos);
char buf[100];
int r = chain_fgetxattr(fd, REPLAY_GUARD_XATTR, buf, sizeof(buf));
if (r < 0) {
- dout(20) << "_check_replay_guard no xattr" << dendl;
+ dout(20) << __FUNC__ << ": no xattr" << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
return 1; // no xattr
}
if (!p.end()) // older journals don't have this
::decode(in_progress, p);
if (opos > spos) {
- dout(10) << "_check_replay_guard object has " << opos << " > current pos " << spos
+ dout(10) << __FUNC__ << ": object has " << opos << " > current pos " << spos
<< ", now or in future, SKIPPING REPLAY" << dendl;
return -1;
} else if (opos == spos) {
if (in_progress) {
- dout(10) << "_check_replay_guard object has " << opos << " == current pos " << spos
+ dout(10) << __FUNC__ << ": object has " << opos << " == current pos " << spos
<< ", in_progress=true, CONDITIONAL REPLAY" << dendl;
return 0;
} else {
- dout(10) << "_check_replay_guard object has " << opos << " == current pos " << spos
+ dout(10) << __FUNC__ << ": object has " << opos << " == current pos " << spos
<< ", in_progress=false, SKIPPING REPLAY" << dendl;
return -1;
}
} else {
- dout(10) << "_check_replay_guard object has " << opos << " < current pos " << spos
+ dout(10) << __FUNC__ << ": object has " << opos << " < current pos " << spos
<< ", in past, will replay" << dendl;
return 1;
}
Transaction& t, uint64_t op_seq, int trans_num,
ThreadPool::TPHandle *handle)
{
- dout(10) << "_do_transaction on " << &t << dendl;
+ dout(10) << __FUNC__ << ": on " << &t << dendl;
#ifdef WITH_LTTNG
const char *osr_name = t.get_osr() ? static_cast<OpSequencer*>(t.get_osr())->get_name().c_str() : "<NULL>";
int r = lfn_stat(cid, oid, st);
assert(allow_eio || !m_filestore_fail_eio || r != -EIO);
if (r < 0) {
- dout(10) << "stat " << cid << "/" << oid
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid
<< " = " << r << dendl;
} else {
- dout(10) << "stat " << cid << "/" << oid
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid
<< " = " << r
<< " (size " << st->st_size << ")" << dendl;
}
tracepoint(objectstore, read_enter, _cid.c_str(), offset, len);
const coll_t& cid = !_need_temp_object_collection(_cid, oid) ? _cid : _cid.get_temp();
- dout(15) << "read " << cid << "/" << oid << " " << offset << "~" << len << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << len << dendl;
FDRef fd;
int r = lfn_open(cid, oid, false, &fd);
if (r < 0) {
- dout(10) << "FileStore::read(" << cid << "/" << oid << ") open error: "
+ dout(10) << __FUNC__ << ": (" << cid << "/" << oid << ") open error: "
<< cpp_strerror(r) << dendl;
return r;
}
bufferptr bptr(len); // prealloc space for entire read
got = safe_pread(**fd, bptr.c_str(), len, offset);
if (got < 0) {
- dout(10) << "FileStore::read(" << cid << "/" << oid << ") pread error: " << cpp_strerror(got) << dendl;
+ dout(10) << __FUNC__ << ": (" << cid << "/" << oid << ") pread error: " << cpp_strerror(got) << dendl;
lfn_close(fd);
if (!(allow_eio || !m_filestore_fail_eio || got != -EIO)) {
- derr << "FileStore::read(" << cid << "/" << oid << ") pread error: " << cpp_strerror(got) << dendl;
+ derr << __FUNC__ << ": (" << cid << "/" << oid << ") pread error: " << cpp_strerror(got) << dendl;
assert(0 == "eio on pread");
}
return got;
ostringstream ss;
int errors = backend->_crc_verify_read(**fd, offset, got, bl, &ss);
if (errors != 0) {
- dout(0) << "FileStore::read " << cid << "/" << oid << " " << offset << "~"
+ dout(0) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~"
<< got << " ... BAD CRC:\n" << ss.str() << dendl;
assert(0 == "bad crc on read");
}
lfn_close(fd);
- dout(10) << "FileStore::read " << cid << "/" << oid << " " << offset << "~"
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~"
<< got << "/" << len << dendl;
if (cct->_conf->filestore_debug_inject_read_err &&
debug_data_eio(oid)) {
while (i < fiemap->fm_mapped_extents) {
struct fiemap_extent *next = extent + 1;
- dout(10) << "FileStore::fiemap() fm_mapped_extents=" << fiemap->fm_mapped_extents
+ dout(10) << __FUNC__ << ": fm_mapped_extents=" << fiemap->fm_mapped_extents
<< " fe_logical=" << extent->fe_logical << " fe_length=" << extent->fe_length << dendl;
/* try to merge extents */
return 0;
}
- dout(15) << "fiemap " << cid << "/" << oid << " " << offset << "~" << len << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << len << dendl;
FDRef fd;
done:
- dout(10) << "fiemap " << cid << "/" << oid << " " << offset << "~" << len << " = " << r << " num_extents=" << destmap.size() << " " << destmap << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << len << " = " << r << " num_extents=" << destmap.size() << " " << destmap << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
tracepoint(objectstore, fiemap_exit, r);
return r;
int FileStore::_remove(const coll_t& cid, const ghobject_t& oid,
const SequencerPosition &spos)
{
- dout(15) << "remove " << cid << "/" << oid << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << dendl;
int r = lfn_unlink(cid, oid, spos);
- dout(10) << "remove " << cid << "/" << oid << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " = " << r << dendl;
return r;
}
int FileStore::_truncate(const coll_t& cid, const ghobject_t& oid, uint64_t size)
{
- dout(15) << "truncate " << cid << "/" << oid << " size " << size << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << " size " << size << dendl;
int r = lfn_truncate(cid, oid, size);
- dout(10) << "truncate " << cid << "/" << oid << " size " << size << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " size " << size << " = " << r << dendl;
return r;
}
int FileStore::_touch(const coll_t& cid, const ghobject_t& oid)
{
- dout(15) << "touch " << cid << "/" << oid << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << dendl;
FDRef fd;
int r = lfn_open(cid, oid, true, &fd);
} else {
lfn_close(fd);
}
- dout(10) << "touch " << cid << "/" << oid << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " = " << r << dendl;
return r;
}
uint64_t offset, size_t len,
const bufferlist& bl, uint32_t fadvise_flags)
{
- dout(15) << "write " << cid << "/" << oid << " " << offset << "~" << len << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << len << dendl;
int r;
FDRef fd;
r = lfn_open(cid, oid, true, &fd);
if (r < 0) {
- dout(0) << "write couldn't open " << cid << "/"
+ dout(0) << __FUNC__ << ": couldn't open " << cid << "/"
<< oid << ": "
<< cpp_strerror(r) << dendl;
goto out;
// write
r = bl.write_fd(**fd, offset);
if (r < 0) {
- derr << __func__ << " write_fd on " << cid << "/" << oid
+ derr << __FUNC__ << ": write_fd on " << cid << "/" << oid
<< " error: " << cpp_strerror(r) << dendl;
lfn_close(fd);
goto out;
lfn_close(fd);
out:
- dout(10) << "write " << cid << "/" << oid << " " << offset << "~" << len << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << len << " = " << r << dendl;
return r;
}
int FileStore::_zero(const coll_t& cid, const ghobject_t& oid, uint64_t offset, size_t len)
{
- dout(15) << "zero " << cid << "/" << oid << " " << offset << "~" << len << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << len << dendl;
int ret = 0;
if (cct->_conf->filestore_punch_hole) {
// lame, kernel is old and doesn't support it.
// write zeros.. yuck!
- dout(20) << "zero falling back to writing zeros" << dendl;
+ dout(20) << __FUNC__ << ": falling back to writing zeros" << dendl;
{
bufferlist bl;
bl.append_zero(len);
# endif
# endif
#endif
- dout(20) << "zero " << cid << "/" << oid << " " << offset << "~" << len << " = " << ret << dendl;
+ dout(20) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << len << " = " << ret << dendl;
return ret;
}
int FileStore::_clone(const coll_t& cid, const ghobject_t& oldoid, const ghobject_t& newoid,
const SequencerPosition& spos)
{
- dout(15) << "clone " << cid << "/" << oldoid << " -> " << cid << "/" << newoid << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oldoid << " -> " << cid << "/" << newoid << dendl;
if (_check_replay_guard(cid, newoid, spos) < 0)
return 0;
out:
lfn_close(o);
out2:
- dout(10) << "clone " << cid << "/" << oldoid << " -> " << cid << "/" << newoid << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oldoid << " -> " << cid << "/" << newoid << " = " << r << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
int FileStore::_do_clone_range(int from, int to, uint64_t srcoff, uint64_t len, uint64_t dstoff)
{
- dout(20) << "_do_clone_range copy " << srcoff << "~" << len << " to " << dstoff << dendl;
+ dout(20) << __FUNC__ << ": copy " << srcoff << "~" << len << " to " << dstoff << dendl;
return backend->clone_range(from, to, srcoff, len, dstoff);
}
int FileStore::_do_sparse_copy_range(int from, int to, uint64_t srcoff, uint64_t len, uint64_t dstoff)
{
- dout(20) << __func__ << " " << srcoff << "~" << len << " to " << dstoff << dendl;
+ dout(20) << __FUNC__ << ": " << srcoff << "~" << len << " to " << dstoff << dendl;
int r = 0;
map<uint64_t, uint64_t> exomap;
// fiemap doesn't allow zero length
uint64_t it_off = miter->first - srcoff + dstoff;
r = _do_copy_range(from, to, miter->first, miter->second, it_off, true);
if (r < 0) {
- derr << "FileStore::_do_copy_range: copy error at " << miter->first << "~" << miter->second
+ derr << __FUNC__ << ": copy error at " << miter->first << "~" << miter->second
<< " to " << it_off << ", " << cpp_strerror(r) << dendl;
break;
}
r = ::fstat(to, &st);
if (r < 0) {
r = -errno;
- derr << __func__ << ": fstat error at " << to << " " << cpp_strerror(r) << dendl;
+ derr << __FUNC__ << ": fstat error at " << to << " " << cpp_strerror(r) << dendl;
goto out;
}
if (st.st_size < (int)(dstoff + len)) {
r = ::ftruncate(to, dstoff + len);
if (r < 0) {
r = -errno;
- derr << __func__ << ": ftruncate error at " << dstoff+len << " " << cpp_strerror(r) << dendl;
+ derr << __FUNC__ << ": ftruncate error at " << dstoff+len << " " << cpp_strerror(r) << dendl;
goto out;
}
}
}
out:
- dout(20) << __func__ << " " << srcoff << "~" << len << " to " << dstoff << " = " << r << dendl;
+ dout(20) << __FUNC__ << ": " << srcoff << "~" << len << " to " << dstoff << " = " << r << dendl;
return r;
}
int FileStore::_do_copy_range(int from, int to, uint64_t srcoff, uint64_t len, uint64_t dstoff, bool skip_sloppycrc)
{
- dout(20) << "_do_copy_range " << srcoff << "~" << len << " to " << dstoff << dendl;
+ dout(20) << __FUNC__ << ": " << srcoff << "~" << len << " to " << dstoff << dendl;
int r = 0;
loff_t pos = srcoff;
loff_t end = srcoff + len;
r = safe_splice(from, &pos, pipefd[1], NULL, l, SPLICE_F_NONBLOCK);
dout(10) << " safe_splice read from " << pos << "~" << l << " got " << r << dendl;
if (r < 0) {
- derr << "FileStore::_do_copy_range: safe_splice read error at " << pos << "~" << len
+ derr << __FUNC__ << ": safe_splice read error at " << pos << "~" << len
<< ", " << cpp_strerror(r) << dendl;
break;
}
if (r == 0) {
// hrm, bad source range, wtf.
r = -ERANGE;
- derr << "FileStore::_do_copy_range got short read result at " << pos
+ derr << __FUNC__ << ": got short read result at " << pos
<< " of fd " << from << " len " << len << dendl;
break;
}
dout(10) << " safe_splice write to " << to << " len " << r
<< " got " << r << dendl;
if (r < 0) {
- derr << "FileStore::_do_copy_range: write error at " << pos << "~"
+ derr << __FUNC__ << ": write error at " << pos << "~"
<< r << ", " << cpp_strerror(r) << dendl;
break;
}
continue;
} else {
r = -errno;
- derr << "FileStore::_do_copy_range: read error at " << pos << "~" << len
+ derr << __FUNC__ << ": read error at " << pos << "~" << len
<< ", " << cpp_strerror(r) << dendl;
break;
}
if (r == 0) {
// hrm, bad source range, wtf.
r = -ERANGE;
- derr << "FileStore::_do_copy_range got short read result at " << pos
+ derr << __FUNC__ << ": got short read result at " << pos
<< " of fd " << from << " len " << len << dendl;
break;
}
<< " got " << r2 << dendl;
if (r2 < 0) {
r = r2;
- derr << "FileStore::_do_copy_range: write error at " << pos << "~"
+ derr << __FUNC__ << ": write error at " << pos << "~"
<< r-op << ", " << cpp_strerror(r) << dendl;
break;
if (r < 0 && replaying) {
assert(r == -ERANGE);
- derr << "Filestore: short source tolerated because we are replaying" << dendl;
+ derr << __FUNC__ << ": short source tolerated because we are replaying" << dendl;
r = pos - from;;
}
assert(replaying || pos == end);
int rc = backend->_crc_update_clone_range(from, to, srcoff, len, dstoff);
assert(rc >= 0);
}
- dout(20) << "_do_copy_range " << srcoff << "~" << len << " to " << dstoff << " = " << r << dendl;
+ dout(20) << __FUNC__ << ": " << srcoff << "~" << len << " to " << dstoff << " = " << r << dendl;
return r;
}
uint64_t srcoff, uint64_t len, uint64_t dstoff,
const SequencerPosition& spos)
{
- dout(15) << "clone_range " << oldcid << "/" << oldoid << " -> " << newcid << "/" << newoid << " " << srcoff << "~" << len << " to " << dstoff << dendl;
+ dout(15) << __FUNC__ << ": " << oldcid << "/" << oldoid << " -> " << newcid << "/" << newoid << " " << srcoff << "~" << len << " to " << dstoff << dendl;
if (_check_replay_guard(newcid, newoid, spos) < 0)
return 0;
out:
lfn_close(o);
out2:
- dout(10) << "clone_range " << oldcid << "/" << oldoid << " -> " << newcid << "/" << newoid << " "
+ dout(10) << __FUNC__ << ": " << oldcid << "/" << oldoid << " -> " << newcid << "/" << newoid << " "
<< srcoff << "~" << len << " to " << dstoff << " = " << r << dendl;
return r;
}
utime_t startwait = ceph_clock_now();
if (!force_sync) {
- dout(20) << "sync_entry waiting for max_interval " << max_interval << dendl;
+ dout(20) << __FUNC__ << ": waiting for max_interval " << max_interval << dendl;
sync_cond.WaitInterval(lock, max_interval);
} else {
- dout(20) << "sync_entry not waiting, force_sync set" << dendl;
+ dout(20) << __FUNC__ << ": not waiting, force_sync set" << dendl;
}
if (force_sync) {
- dout(20) << "sync_entry force_sync set" << dendl;
+ dout(20) << __FUNC__ << ": force_sync set" << dendl;
force_sync = false;
} else if (stop) {
- dout(20) << __func__ << " stop set" << dendl;
+ dout(20) << __FUNC__ << ": stop set" << dendl;
break;
} else {
// wait for at least the min interval
utime_t woke = ceph_clock_now();
woke -= startwait;
- dout(20) << "sync_entry woke after " << woke << dendl;
+ dout(20) << __FUNC__ << ": woke after " << woke << dendl;
if (woke < min_interval) {
utime_t t = min_interval;
t -= woke;
- dout(20) << "sync_entry waiting for another " << t
+ dout(20) << __FUNC__ << ": waiting for another " << t
<< " to reach min interval " << min_interval << dendl;
sync_cond.WaitInterval(lock, t);
}
logger->set(l_filestore_committing, 1);
- dout(15) << "sync_entry committing " << cp << dendl;
+ dout(15) << __FUNC__ << ": committing " << cp << dendl;
stringstream errstream;
if (cct->_conf->filestore_debug_omap_check && !object_map->check(errstream)) {
derr << errstream.str() << dendl;
utime_t done = ceph_clock_now();
utime_t lat = done - start;
utime_t dur = done - startwait;
- dout(10) << "sync_entry commit took " << lat << ", interval was " << dur << dendl;
+ dout(10) << __FUNC__ << ": commit took " << lat << ", interval was " << dur << dendl;
logger->inc(l_filestore_commitcycle);
logger->tinc(l_filestore_commitcycle_latency, lat);
}
}
- dout(15) << "sync_entry committed to op_seq " << cp << dendl;
+ dout(15) << __FUNC__ << ": committed to op_seq " << cp << dendl;
sync_entry_timeo_lock.Lock();
timer.cancel_event(sync_entry_timeo);
finish_contexts(cct, fin, 0);
fin.clear();
if (!sync_waiters.empty()) {
- dout(10) << "sync_entry more waiters, committing again" << dendl;
+ dout(10) << __FUNC__ << ": more waiters, committing again" << dendl;
goto again;
}
if (!stop && journal && journal->should_commit_now()) {
- dout(10) << "sync_entry journal says we should commit again (probably is/was full)" << dendl;
+ dout(10) << __FUNC__ << ": journal says we should commit again (probably is/was full)" << dendl;
goto again;
}
}
void FileStore::_start_sync()
{
if (!journal) { // don't do a big sync if the journal is on
- dout(10) << "start_sync" << dendl;
+ dout(10) << __FUNC__ << dendl;
sync_cond.Signal();
} else {
- dout(10) << "start_sync - NOOP (journal is on)" << dendl;
+ dout(10) << __FUNC__ << ": - NOOP (journal is on)" << dendl;
}
}
void FileStore::do_force_sync()
{
- dout(10) << __func__ << dendl;
+ dout(10) << __FUNC__ << dendl;
Mutex::Locker l(lock);
force_sync = true;
sync_cond.Signal();
sync_waiters.push_back(onsafe);
sync_cond.Signal();
force_sync = true;
- dout(10) << "start_sync" << dendl;
+ dout(10) << __FUNC__ << dendl;
}
void FileStore::sync()
void FileStore::_flush_op_queue()
{
- dout(10) << "_flush_op_queue draining op tp" << dendl;
+ dout(10) << __FUNC__ << ": draining op tp" << dendl;
op_wq.drain();
- dout(10) << "_flush_op_queue waiting for apply finisher" << dendl;
+ dout(10) << __FUNC__ << ": waiting for apply finisher" << dendl;
for (vector<Finisher*>::iterator it = apply_finishers.begin(); it != apply_finishers.end(); ++it) {
(*it)->wait_for_empty();
}
*/
void FileStore::flush()
{
- dout(10) << "flush" << dendl;
+ dout(10) << __FUNC__ << dendl;
if (cct->_conf->filestore_blackhole) {
// wait forever
if (m_filestore_journal_writeahead) {
if (journal)
journal->flush();
- dout(10) << "flush draining ondisk finisher" << dendl;
+ dout(10) << __FUNC__ << ": draining ondisk finisher" << dendl;
for (vector<Finisher*>::iterator it = ondisk_finishers.begin(); it != ondisk_finishers.end(); ++it) {
(*it)->wait_for_empty();
}
}
_flush_op_queue();
- dout(10) << "flush complete" << dendl;
+ dout(10) << __FUNC__ << ": complete" << dendl;
}
/*
*/
void FileStore::sync_and_flush()
{
- dout(10) << "sync_and_flush" << dendl;
+ dout(10) << __FUNC__ << dendl;
if (m_filestore_journal_writeahead) {
if (journal)
_flush_op_queue();
sync();
}
- dout(10) << "sync_and_flush done" << dendl;
+ dout(10) << __FUNC__ << ": done" << dendl;
}
int FileStore::flush_journal()
{
- dout(10) << __func__ << dendl;
+ dout(10) << __FUNC__ << dendl;
sync_and_flush();
sync();
return 0;
int FileStore::snapshot(const string& name)
{
- dout(10) << "snapshot " << name << dendl;
+ dout(10) << __FUNC__ << ": " << name << dendl;
sync_and_flush();
if (!backend->can_checkpoint()) {
- dout(0) << "snapshot " << name << " failed, not supported" << dendl;
+ dout(0) << __FUNC__ << ": " << name << " failed, not supported" << dendl;
return -EOPNOTSUPP;
}
int r = backend->create_checkpoint(s, NULL);
if (r) {
- derr << "snapshot " << name << " failed: " << cpp_strerror(r) << dendl;
+ derr << __FUNC__ << ": " << name << " failed: " << cpp_strerror(r) << dendl;
}
return r;
char *attrname = name;
if (parse_attrname(&name)) {
if (*name) {
- dout(20) << "fgetattrs " << fd << " getting '" << name << "'" << dendl;
+ dout(20) << __FUNC__ << ": " << fd << " getting '" << name << "'" << dendl;
int r = _fgetattr(fd, attrname, aset[name]);
if (r < 0) {
delete[] names2;
// ??? Why do we skip setting all the other attrs if one fails?
int r = chain_fsetxattr(fd, n, val, p->second.length());
if (r < 0) {
- derr << "FileStore::_setattrs: chain_setxattr returned " << r << dendl;
+ derr << __FUNC__ << ": chain_setxattr returned " << r << dendl;
return r;
}
}
// debug EIO injection
void FileStore::inject_data_error(const ghobject_t &oid) {
Mutex::Locker l(read_error_lock);
- dout(10) << __func__ << ": init error on " << oid << dendl;
+ dout(10) << __FUNC__ << ": init error on " << oid << dendl;
data_error_set.insert(oid);
}
void FileStore::inject_mdata_error(const ghobject_t &oid) {
Mutex::Locker l(read_error_lock);
- dout(10) << __func__ << ": init error on " << oid << dendl;
+ dout(10) << __FUNC__ << ": init error on " << oid << dendl;
mdata_error_set.insert(oid);
}
void FileStore::debug_obj_on_delete(const ghobject_t &oid) {
Mutex::Locker l(read_error_lock);
- dout(10) << __func__ << ": clear error on " << oid << dendl;
+ dout(10) << __FUNC__ << ": clear error on " << oid << dendl;
data_error_set.erase(oid);
mdata_error_set.erase(oid);
}
bool FileStore::debug_data_eio(const ghobject_t &oid) {
Mutex::Locker l(read_error_lock);
if (data_error_set.count(oid)) {
- dout(10) << __func__ << ": inject error on " << oid << dendl;
+ dout(10) << __FUNC__ << ": inject error on " << oid << dendl;
return true;
} else {
return false;
bool FileStore::debug_mdata_eio(const ghobject_t &oid) {
Mutex::Locker l(read_error_lock);
if (mdata_error_set.count(oid)) {
- dout(10) << __func__ << ": inject error on " << oid << dendl;
+ dout(10) << __FUNC__ << ": inject error on " << oid << dendl;
return true;
} else {
return false;
{
tracepoint(objectstore, getattr_enter, _cid.c_str());
const coll_t& cid = !_need_temp_object_collection(_cid, oid) ? _cid : _cid.get_temp();
- dout(15) << "getattr " << cid << "/" << oid << " '" << name << "'" << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << " '" << name << "'" << dendl;
FDRef fd;
int r = lfn_open(cid, oid, false, &fd);
if (r < 0) {
Index index;
r = get_index(cid, &index);
if (r < 0) {
- dout(10) << __func__ << " could not get index r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not get index r = " << r << dendl;
goto out;
}
r = object_map->get_xattrs(oid, to_get, &got);
if (r < 0 && r != -ENOENT) {
- dout(10) << __func__ << " get_xattrs err r =" << r << dendl;
+ dout(10) << __FUNC__ << ": get_xattrs err r =" << r << dendl;
goto out;
}
if (got.empty()) {
- dout(10) << __func__ << " got.size() is 0" << dendl;
+ dout(10) << __FUNC__ << ": got.size() is 0" << dendl;
return -ENODATA;
}
bp = bufferptr(got.begin()->second.c_str(),
r = bp.length();
}
out:
- dout(10) << "getattr " << cid << "/" << oid << " '" << name << "' = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " '" << name << "' = " << r << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
if (cct->_conf->filestore_debug_inject_read_err &&
debug_mdata_eio(oid)) {
set<string> omap_attrs;
map<string, bufferlist> omap_aset;
Index index;
- dout(15) << "getattrs " << cid << "/" << oid << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << dendl;
FDRef fd;
bool spill_out = true;
char buf[2];
}
if (!spill_out) {
- dout(10) << __func__ << " no xattr exists in object_map r = " << r << dendl;
+ dout(10) << __FUNC__ << ": no xattr exists in object_map r = " << r << dendl;
goto out;
}
r = get_index(cid, &index);
if (r < 0) {
- dout(10) << __func__ << " could not get index r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not get index r = " << r << dendl;
goto out;
}
{
r = object_map->get_all_xattrs(oid, &omap_attrs);
if (r < 0 && r != -ENOENT) {
- dout(10) << __func__ << " could not get omap_attrs r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not get omap_attrs r = " << r << dendl;
goto out;
}
r = object_map->get_xattrs(oid, omap_attrs, &omap_aset);
if (r < 0 && r != -ENOENT) {
- dout(10) << __func__ << " could not get omap_attrs r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not get omap_attrs r = " << r << dendl;
goto out;
}
if (r == -ENOENT)
bufferptr(i->second.c_str(), i->second.length())));
}
out:
- dout(10) << "getattrs " << cid << "/" << oid << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " = " << r << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
if (cct->_conf->filestore_debug_inject_read_err &&
r = _fgetattrs(**fd, inline_set);
incomplete_inline = (r == -E2BIG);
assert(!m_filestore_fail_eio || r != -EIO);
- dout(15) << "setattrs " << cid << "/" << oid
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid
<< (incomplete_inline ? " (incomplete_inline, forcing omap)" : "")
<< dendl;
if (spill_out && !omap_remove.empty()) {
r = object_map->remove_xattrs(oid, omap_remove, &spos);
if (r < 0 && r != -ENOENT) {
- dout(10) << __func__ << " could not remove_xattrs r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not remove_xattrs r = " << r << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
goto out_close;
} else {
if (!omap_set.empty()) {
r = object_map->set_xattrs(oid, omap_set, &spos);
if (r < 0) {
- dout(10) << __func__ << " could not set_xattrs r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not set_xattrs r = " << r << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
goto out_close;
}
out_close:
lfn_close(fd);
out:
- dout(10) << "setattrs " << cid << "/" << oid << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " = " << r << dendl;
return r;
}
int FileStore::_rmattr(const coll_t& cid, const ghobject_t& oid, const char *name,
const SequencerPosition &spos)
{
- dout(15) << "rmattr " << cid << "/" << oid << " '" << name << "'" << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << " '" << name << "'" << dendl;
FDRef fd;
bool spill_out = true;
Index index;
r = get_index(cid, &index);
if (r < 0) {
- dout(10) << __func__ << " could not get index r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not get index r = " << r << dendl;
goto out_close;
}
set<string> to_remove;
to_remove.insert(string(name));
r = object_map->remove_xattrs(oid, to_remove, &spos);
if (r < 0 && r != -ENOENT) {
- dout(10) << __func__ << " could not remove_xattrs index r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not remove_xattrs index r = " << r << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
goto out_close;
}
out_close:
lfn_close(fd);
out:
- dout(10) << "rmattr " << cid << "/" << oid << " '" << name << "' = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " '" << name << "' = " << r << dendl;
return r;
}
int FileStore::_rmattrs(const coll_t& cid, const ghobject_t& oid,
const SequencerPosition &spos)
{
- dout(15) << "rmattrs " << cid << "/" << oid << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << dendl;
map<string,bufferptr> aset;
FDRef fd;
get_attrname(p->first.c_str(), n, CHAIN_XATTR_MAX_NAME_LEN);
r = chain_fremovexattr(**fd, n);
if (r < 0) {
- dout(10) << __func__ << " could not remove xattr r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not remove xattr r = " << r << dendl;
goto out_close;
}
}
}
if (!spill_out) {
- dout(10) << __func__ << " no xattr exists in object_map r = " << r << dendl;
+ dout(10) << __FUNC__ << ": no xattr exists in object_map r = " << r << dendl;
goto out_close;
}
r = get_index(cid, &index);
if (r < 0) {
- dout(10) << __func__ << " could not get index r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not get index r = " << r << dendl;
goto out_close;
}
{
r = object_map->get_all_xattrs(oid, &omap_attrs);
if (r < 0 && r != -ENOENT) {
- dout(10) << __func__ << " could not get omap_attrs r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not get omap_attrs r = " << r << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
goto out_close;
}
r = object_map->remove_xattrs(oid, omap_attrs, &spos);
if (r < 0 && r != -ENOENT) {
- dout(10) << __func__ << " could not remove omap_attrs r = " << r << dendl;
+ dout(10) << __FUNC__ << ": could not remove omap_attrs r = " << r << dendl;
goto out_close;
}
if (r == -ENOENT)
out_close:
lfn_close(fd);
out:
- dout(10) << "rmattrs " << cid << "/" << oid << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " = " << r << dendl;
return r;
}
int FileStore::list_collections(vector<coll_t>& ls, bool include_temp)
{
tracepoint(objectstore, list_collections_enter);
- dout(10) << "list_collections" << dendl;
+ dout(10) << __FUNC__ << dendl;
char fn[PATH_MAX];
snprintf(fn, sizeof(fn), "%s/current", basedir.c_str());
tracepoint(objectstore, collection_stat_enter, c.c_str());
char fn[PATH_MAX];
get_cdir(c, fn, sizeof(fn));
- dout(15) << "collection_stat " << fn << dendl;
+ dout(15) << __FUNC__ << ": " << fn << dendl;
int r = ::stat(fn, st);
if (r < 0)
r = -errno;
- dout(10) << "collection_stat " << fn << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << fn << " = " << r << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
tracepoint(objectstore, collection_stat_exit, r);
return r;
int FileStore::collection_empty(const coll_t& c, bool *empty)
{
tracepoint(objectstore, collection_empty_enter, c.c_str());
- dout(15) << "collection_empty " << c << dendl;
+ dout(15) << __FUNC__ << ": " << c << dendl;
Index index;
int r = get_index(c, &index);
if (r < 0) {
- derr << __func__ << " get_index returned: " << cpp_strerror(r)
+ derr << __FUNC__ << ": get_index returned: " << cpp_strerror(r)
<< dendl;
return r;
}
r = index->collection_list_partial(ghobject_t(), ghobject_t::get_max(),
1, &ls, NULL);
if (r < 0) {
- derr << __func__ << " collection_list_partial returned: "
+ derr << __FUNC__ << ": collection_list_partial returned: "
<< cpp_strerror(r) << dendl;
assert(!m_filestore_fail_eio || r != -EIO);
return r;
{
char fn[PATH_MAX];
get_cdir(c, fn, sizeof(fn));
- dout(10) << "collection_set_bits " << fn << " " << bits << dendl;
+ dout(10) << __FUNC__ << ": " << fn << " " << bits << dendl;
char n[PATH_MAX];
int r;
int32_t v = bits;
r = chain_fsetxattr(fd, n, (char*)&v, sizeof(v));
VOID_TEMP_FAILURE_RETRY(::close(fd));
out:
- dout(10) << "collection_setattr " << fn << " " << bits << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << fn << " " << bits << " = " << r << dendl;
return r;
}
{
char fn[PATH_MAX];
get_cdir(c, fn, sizeof(fn));
- dout(15) << "collection_bits " << fn << dendl;
+ dout(15) << __FUNC__ << ": " << fn << dendl;
int r;
char n[PATH_MAX];
int32_t bits;
goto out;
}
out:
- dout(10) << "collection_bits " << fn << " = " << bits << dendl;
+ dout(10) << __FUNC__ << ": " << fn << " = " << bits << dendl;
return bits;
}
pool = 0;
shard = shard_id_t::NO_SHARD;
}
- dout(20) << __func__ << " pool is " << pool << " shard is " << shard
+ dout(20) << __FUNC__ << ": pool is " << pool << " shard is " << shard
<< " pgid " << pgid << dendl;
}
ghobject_t sep;
sep.set_shard(shard);
if (!c.is_temp() && !c.is_meta()) {
if (start < sep) {
- dout(10) << __func__ << " first checking temp pool" << dendl;
+ dout(10) << __FUNC__ << ": first checking temp pool" << dendl;
coll_t temp = c.get_temp();
int r = collection_list(temp, start, end, max, ls, next);
if (r < 0)
if (*next != ghobject_t::get_max())
return r;
start = sep;
- dout(10) << __func__ << " fall through to non-temp collection, start "
+ dout(10) << __FUNC__ << ": fall through to non-temp collection, start "
<< start << dendl;
} else {
- dout(10) << __func__ << " start " << start << " >= sep " << sep << dendl;
+ dout(10) << __FUNC__ << ": start " << start << " >= sep " << sep << dendl;
}
}
{
tracepoint(objectstore, omap_get_enter, _c.c_str());
const coll_t& c = !_need_temp_object_collection(_c, hoid) ? _c : _c.get_temp();
- dout(15) << __func__ << " " << c << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << c << "/" << hoid << dendl;
Index index;
int r = get_index(c, &index);
if (r < 0)
{
tracepoint(objectstore, omap_get_header_enter, _c.c_str());
const coll_t& c = !_need_temp_object_collection(_c, hoid) ? _c : _c.get_temp();
- dout(15) << __func__ << " " << c << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << c << "/" << hoid << dendl;
Index index;
int r = get_index(c, &index);
if (r < 0)
{
tracepoint(objectstore, omap_get_keys_enter, _c.c_str());
const coll_t& c = !_need_temp_object_collection(_c, hoid) ? _c : _c.get_temp();
- dout(15) << __func__ << " " << c << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << c << "/" << hoid << dendl;
Index index;
int r = get_index(c, &index);
if (r < 0)
{
tracepoint(objectstore, omap_get_values_enter, _c.c_str());
const coll_t& c = !_need_temp_object_collection(_c, hoid) ? _c : _c.get_temp();
- dout(15) << __func__ << " " << c << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << c << "/" << hoid << dendl;
Index index;
const char *where = "()";
int r = get_index(c, &index);
r = 0;
out:
tracepoint(objectstore, omap_get_values_exit, r);
- dout(15) << __func__ << " " << c << "/" << hoid << " = " << r
+ dout(15) << __FUNC__ << ": " << c << "/" << hoid << " = " << r
<< where << dendl;
return r;
}
{
tracepoint(objectstore, omap_check_keys_enter, _c.c_str());
const coll_t& c = !_need_temp_object_collection(_c, hoid) ? _c : _c.get_temp();
- dout(15) << __func__ << " " << c << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << c << "/" << hoid << dendl;
Index index;
int r = get_index(c, &index);
{
tracepoint(objectstore, get_omap_iterator, _c.c_str());
const coll_t& c = !_need_temp_object_collection(_c, hoid) ? _c : _c.get_temp();
- dout(15) << __func__ << " " << c << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << c << "/" << hoid << dendl;
Index index;
int r = get_index(c, &index);
if (r < 0) {
- dout(10) << __func__ << " " << c << "/" << hoid << " = 0 "
+ dout(10) << __FUNC__ << ": " << c << "/" << hoid << " = 0 "
<< "(get_index failed with " << cpp_strerror(r) << ")" << dendl;
return ObjectMap::ObjectMapIterator();
}
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0) {
- dout(10) << __func__ << " " << c << "/" << hoid << " = 0 "
+ dout(10) << __FUNC__ << ": " << c << "/" << hoid << " = 0 "
<< "(lfn_find failed with " << cpp_strerror(r) << ")" << dendl;
return ObjectMap::ObjectMapIterator();
}
uint64_t expected_num_objs,
const SequencerPosition &spos)
{
- dout(15) << __func__ << " collection: " << c << " pg number: "
+ dout(15) << __FUNC__ << ": collection: " << c << " pg number: "
<< pg_num << " expected number of objects: " << expected_num_objs << dendl;
bool empty;
{
char fn[PATH_MAX];
get_cdir(c, fn, sizeof(fn));
- dout(15) << "create_collection " << fn << dendl;
+ dout(15) << __FUNC__ << ": " << fn << dendl;
int r = ::mkdir(fn, 0755);
if (r < 0)
r = -errno;
if (r == -EEXIST && replaying)
r = 0;
- dout(10) << "create_collection " << fn << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << fn << " = " << r << dendl;
if (r < 0)
return r;
int r = 0;
char fn[PATH_MAX];
get_cdir(c, fn, sizeof(fn));
- dout(15) << "_destroy_collection " << fn << dendl;
+ dout(15) << __FUNC__ << ": " << fn << dendl;
{
Index from;
r = get_index(c, &from);
}
out_final:
- dout(10) << "_destroy_collection " << fn << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << fn << " = " << r << dendl;
return r;
}
int FileStore::_collection_add(const coll_t& c, const coll_t& oldcid, const ghobject_t& o,
const SequencerPosition& spos)
{
- dout(15) << "collection_add " << c << "/" << o << " from " << oldcid << "/" << o << dendl;
+ dout(15) << __FUNC__ << ": " << c << "/" << o << " from " << oldcid << "/" << o << dendl;
int dstcmp = _check_replay_guard(c, o, spos);
if (dstcmp < 0)
// the source collection/object does not exist. If we are replaying, we
// should be safe, so just return 0 and move on.
assert(replaying);
- dout(10) << "collection_add " << c << "/" << o << " from "
+ dout(10) << __FUNC__ << ": " << c << "/" << o << " from "
<< oldcid << "/" << o << " (dne, continue replay) " << dendl;
return 0;
}
}
lfn_close(fd);
- dout(10) << "collection_add " << c << "/" << o << " from " << oldcid << "/" << o << " = " << r << dendl;
+ dout(10) << __FUNC__ << ": " << c << "/" << o << " from " << oldcid << "/" << o << " = " << r << dendl;
return r;
}
const SequencerPosition& spos,
bool allow_enoent)
{
- dout(15) << __func__ << " " << c << "/" << o << " from " << oldcid << "/" << oldoid << dendl;
+ dout(15) << __FUNC__ << ": " << c << "/" << o << " from " << oldcid << "/" << oldoid << dendl;
int r = 0;
int dstcmp, srccmp;
// the source collection/object does not exist. If we are replaying, we
// should be safe, so just return 0 and move on.
if (replaying) {
- dout(10) << __func__ << " " << c << "/" << o << " from "
+ dout(10) << __FUNC__ << ": " << c << "/" << o << " from "
<< oldcid << "/" << oldoid << " (dne, continue replay) " << dendl;
} else if (allow_enoent) {
- dout(10) << __func__ << " " << c << "/" << o << " from "
+ dout(10) << __FUNC__ << ": " << c << "/" << o << " from "
<< oldcid << "/" << oldoid << " (dne, ignoring enoent)"
<< dendl;
} else {
}
}
- dout(10) << __func__ << " " << c << "/" << o << " from " << oldcid << "/" << oldoid
+ dout(10) << __FUNC__ << ": " << c << "/" << o << " from " << oldcid << "/" << oldoid
<< " = " << r << dendl;
return r;
r = lfn_unlink(oldcid, oldoid, spos, true);
}
- dout(10) << __func__ << " " << c << "/" << o << " from " << oldcid << "/" << oldoid
+ dout(10) << __FUNC__ << ": " << c << "/" << o << " from " << oldcid << "/" << oldoid
<< " = " << r << dendl;
return r;
}
void FileStore::_inject_failure()
{
- if (m_filestore_kill_at.read()) {
- int final = m_filestore_kill_at.dec();
- dout(5) << "_inject_failure " << (final+1) << " -> " << final << dendl;
+ if (m_filestore_kill_at) {
+ int final = --m_filestore_kill_at;
+ dout(5) << __FUNC__ << ": " << (final+1) << " -> " << final << dendl;
if (final == 0) {
- derr << "_inject_failure KILLING" << dendl;
+ derr << __FUNC__ << ": KILLING" << dendl;
cct->_log->flush();
_exit(1);
}
int FileStore::_omap_clear(const coll_t& cid, const ghobject_t &hoid,
const SequencerPosition &spos) {
- dout(15) << __func__ << " " << cid << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << hoid << dendl;
Index index;
int r = get_index(cid, &index);
if (r < 0)
int FileStore::_omap_setkeys(const coll_t& cid, const ghobject_t &hoid,
const map<string, bufferlist> &aset,
const SequencerPosition &spos) {
- dout(15) << __func__ << " " << cid << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << hoid << dendl;
Index index;
int r;
//treat pgmeta as a logical object, skip to check exist
r = get_index(cid, &index);
if (r < 0) {
- dout(20) << __func__ << " get_index got " << cpp_strerror(r) << dendl;
+ dout(20) << __FUNC__ << ": get_index got " << cpp_strerror(r) << dendl;
return r;
}
{
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0) {
- dout(20) << __func__ << " lfn_find got " << cpp_strerror(r) << dendl;
+ dout(20) << __FUNC__ << ": lfn_find got " << cpp_strerror(r) << dendl;
return r;
}
}
skip:
if (g_conf->subsys.should_gather(ceph_subsys_filestore, 20)) {
for (auto& p : aset) {
- dout(20) << __func__ << " set " << p.first << dendl;
+ dout(20) << __FUNC__ << ": set " << p.first << dendl;
}
}
r = object_map->set_keys(hoid, aset, &spos);
- dout(20) << __func__ << " " << cid << "/" << hoid << " = " << r << dendl;
+ dout(20) << __FUNC__ << ": " << cid << "/" << hoid << " = " << r << dendl;
return r;
}
int FileStore::_omap_rmkeys(const coll_t& cid, const ghobject_t &hoid,
const set<string> &keys,
const SequencerPosition &spos) {
- dout(15) << __func__ << " " << cid << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << hoid << dendl;
Index index;
int r;
//treat pgmeta as a logical object, skip to check exist
int FileStore::_omap_rmkeyrange(const coll_t& cid, const ghobject_t &hoid,
const string& first, const string& last,
const SequencerPosition &spos) {
- dout(15) << __func__ << " " << cid << "/" << hoid << " [" << first << "," << last << "]" << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << hoid << " [" << first << "," << last << "]" << dendl;
set<string> keys;
{
ObjectMap::ObjectMapIterator iter = get_omap_iterator(cid, hoid);
const bufferlist &bl,
const SequencerPosition &spos)
{
- dout(15) << __func__ << " " << cid << "/" << hoid << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << hoid << dendl;
Index index;
int r = get_index(cid, &index);
if (r < 0)
{
int r;
{
- dout(15) << __func__ << " " << cid << " bits: " << bits << dendl;
+ dout(15) << __FUNC__ << ": " << cid << " bits: " << bits << dendl;
if (!collection_exists(cid)) {
- dout(2) << __func__ << ": " << cid << " DNE" << dendl;
+ dout(2) << __FUNC__ << ": " << cid << " DNE" << dendl;
assert(replaying);
return 0;
}
if (!collection_exists(dest)) {
- dout(2) << __func__ << ": " << dest << " DNE" << dendl;
+ dout(2) << __FUNC__ << ": " << dest << " DNE" << dendl;
assert(replaying);
return 0;
}
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
- dout(20) << __func__ << ": " << *i << " still in source "
+ dout(20) << __FUNC__ << ": " << *i << " still in source "
<< cid << dendl;
assert(!i->match(bits, rem));
}
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
- dout(20) << __func__ << ": " << *i << " now in dest "
+ dout(20) << __FUNC__ << ": " << *i << " now in dest "
<< *i << dendl;
assert(i->match(bits, rem));
}
uint64_t expected_object_size,
uint64_t expected_write_size)
{
- dout(15) << "set_alloc_hint " << cid << "/" << oid << " object_size " << expected_object_size << " write_size " << expected_write_size << dendl;
+ dout(15) << __FUNC__ << ": " << cid << "/" << oid << " object_size " << expected_object_size << " write_size " << expected_write_size << dendl;
FDRef fd;
int ret = 0;
uint64_t hint = MIN(expected_write_size, m_filestore_max_alloc_hint_size);
ret = backend->set_alloc_hint(**fd, hint);
- dout(20) << "set_alloc_hint hint " << hint << " ret " << ret << dendl;
+ dout(20) << __FUNC__ << ": hint " << hint << " ret " << ret << dendl;
}
lfn_close(fd);
out:
- dout(10) << "set_alloc_hint " << cid << "/" << oid << " object_size " << expected_object_size << " write_size " << expected_write_size << " = " << ret << dendl;
+ dout(10) << __FUNC__ << ": " << cid << "/" << oid << " object_size " << expected_object_size << " write_size " << expected_write_size << " = " << ret << dendl;
assert(!m_filestore_fail_eio || ret != -EIO);
return ret;
}
Mutex::Locker l(lock);
m_filestore_min_sync_interval = conf->filestore_min_sync_interval;
m_filestore_max_sync_interval = conf->filestore_max_sync_interval;
- m_filestore_kill_at.set(conf->filestore_kill_at);
+ m_filestore_kill_at = conf->filestore_kill_at;
m_filestore_fail_eio = conf->filestore_fail_eio;
m_filestore_fadvise = conf->filestore_fadvise;
m_filestore_sloppy_crc = conf->filestore_sloppy_crc;
void FileStore::dump_start(const std::string& file)
{
- dout(10) << "dump_start " << file << dendl;
+ dout(10) << __FUNC__ << ": " << file << dendl;
if (m_filestore_do_dump) {
dump_stop();
}
void FileStore::dump_stop()
{
- dout(10) << "dump_stop" << dendl;
+ dout(10) << __FUNC__ << dendl;
m_filestore_do_dump = false;
if (m_filestore_dump.is_open()) {
m_filestore_dump_fmt.close_section();
int FileStore::apply_layout_settings(const coll_t &cid)
{
- dout(20) << __func__ << " " << cid << dendl;
+ dout(20) << __FUNC__ << ": " << cid << dendl;
Index index;
int r = get_index(cid, &index);
if (r < 0) {