#include <sys/types.h>
#include <time.h>
#include <utime.h>
+#include <string.h>
#include <sys/stat.h>
#include <sys/param.h>
#include <fcntl.h>
#include "common/config.h"
#include "common/version.h"
-// ceph stuff
-#include "messages/MClientSession.h"
+#include "mon/MonClient.h"
+
+#include "messages/MClientCaps.h"
+#include "messages/MClientLease.h"
+#include "messages/MClientQuota.h"
+#include "messages/MClientReclaim.h"
+#include "messages/MClientReclaimReply.h"
#include "messages/MClientReconnect.h"
+#include "messages/MClientReply.h"
#include "messages/MClientRequest.h"
#include "messages/MClientRequestForward.h"
-#include "messages/MClientReply.h"
-#include "messages/MClientCaps.h"
-#include "messages/MClientLease.h"
+#include "messages/MClientSession.h"
#include "messages/MClientSnap.h"
#include "messages/MCommandReply.h"
-#include "messages/MOSDMap.h"
-#include "messages/MClientQuota.h"
-#include "messages/MClientCapRelease.h"
-#include "messages/MMDSMap.h"
#include "messages/MFSMap.h"
#include "messages/MFSMapUser.h"
-
-#include "mon/MonClient.h"
+#include "messages/MMDSMap.h"
+#include "messages/MOSDMap.h"
#include "mds/flock.h"
+#include "mds/cephfs_features.h"
#include "osd/OSDMap.h"
#include "osdc/Filer.h"
#include "common/Cond.h"
-#include "common/Mutex.h"
#include "common/perf_counters.h"
#include "common/admin_socket.h"
#include "common/errno.h"
#include "ObjecterWriteback.h"
#include "posix_acl.h"
-#include "include/assert.h"
+#include "include/ceph_assert.h"
#include "include/stat.h"
-#include "include/cephfs/ceph_statx.h"
+#include "include/cephfs/ceph_ll_client.h"
#if HAVE_GETGROUPLIST
#include <grp.h>
#define DEBUG_GETATTR_CAPS (CEPH_CAP_XATTR_SHARED)
+using namespace TOPNSPC::common;
+
void client_flush_set_callback(void *p, ObjectCacher::ObjectSet *oset)
{
Client *client = static_cast<Client*>(p);
client->flush_set_callback(oset);
}
+bool Client::is_reserved_vino(vinodeno_t &vino) {
+ if (MDS_IS_PRIVATE_INO(vino.ino)) {
+ ldout(cct, -1) << __func__ << " attempt to access reserved inode number " << vino << dendl;
+ return true;
+ }
+ return false;
+}
+
// -------------
{
}
-bool Client::CommandHook::call(std::string command, cmdmap_t& cmdmap,
- std::string format, bufferlist& out)
+int Client::CommandHook::call(
+ std::string_view command,
+ const cmdmap_t& cmdmap,
+ Formatter *f,
+ std::ostream& errss,
+ bufferlist& out)
{
- Formatter *f = Formatter::create(format);
f->open_object_section("result");
- m_client->client_lock.Lock();
- if (command == "mds_requests")
- m_client->dump_mds_requests(f);
- else if (command == "mds_sessions")
- m_client->dump_mds_sessions(f);
- else if (command == "dump_cache")
- m_client->dump_cache(f);
- else if (command == "kick_stale_sessions")
- m_client->_kick_stale_sessions();
- else if (command == "status")
- m_client->dump_status(f);
- else
- assert(0 == "bad command registered");
- m_client->client_lock.Unlock();
+ {
+ std::lock_guard l{m_client->client_lock};
+ if (command == "mds_requests")
+ m_client->dump_mds_requests(f);
+ else if (command == "mds_sessions") {
+ bool cap_dump = false;
+ cmd_getval(cmdmap, "cap_dump", cap_dump);
+ m_client->dump_mds_sessions(f, cap_dump);
+ } else if (command == "dump_cache")
+ m_client->dump_cache(f);
+ else if (command == "kick_stale_sessions")
+ m_client->_kick_stale_sessions();
+ else if (command == "status")
+ m_client->dump_status(f);
+ else
+ ceph_abort_msg("bad command registered");
+ }
f->close_section();
- f->flush(out);
- delete f;
- return true;
+ return 0;
}
free_faked_inos.clear();
free_faked_inos.insert(start, (uint32_t)-1 - start + 1);
last_used_faked_ino = 0;
+ last_used_faked_root = 0;
_use_faked_inos = sizeof(ino_t) < 8 || cct->_conf->client_use_faked_inos;
}
void Client::_assign_faked_ino(Inode *in)
{
+ if (0 == last_used_faked_ino)
+ last_used_faked_ino = last_used_faked_ino + 2048; // start(1024)~2048 reserved for _assign_faked_root
interval_set<ino_t>::const_iterator it = free_faked_inos.lower_bound(last_used_faked_ino + 1);
if (it == free_faked_inos.end() && last_used_faked_ino > 0) {
- last_used_faked_ino = 0;
+ last_used_faked_ino = 2048;
it = free_faked_inos.lower_bound(last_used_faked_ino + 1);
}
- assert(it != free_faked_inos.end());
+ ceph_assert(it != free_faked_inos.end());
if (last_used_faked_ino < it.get_start()) {
- assert(it.get_len() > 0);
+ ceph_assert(it.get_len() > 0);
last_used_faked_ino = it.get_start();
} else {
++last_used_faked_ino;
- assert(it.get_start() + it.get_len() > last_used_faked_ino);
+ ceph_assert(it.get_start() + it.get_len() > last_used_faked_ino);
}
in->faked_ino = last_used_faked_ino;
free_faked_inos.erase(in->faked_ino);
faked_ino_map[in->faked_ino] = in->vino();
}
+/*
+ * In the faked mode, if you export multiple subdirectories,
+ * you will see that the inode numbers of the exported subdirectories
+ * are the same. so we distinguish the mount point by reserving
+ * the "fake ids" between "1024~2048" and combining the last
+ * 10bits(0x3ff) of the "root inodes".
+*/
+void Client::_assign_faked_root(Inode *in)
+{
+ interval_set<ino_t>::const_iterator it = free_faked_inos.lower_bound(last_used_faked_root + 1);
+ if (it == free_faked_inos.end() && last_used_faked_root > 0) {
+ last_used_faked_root = 0;
+ it = free_faked_inos.lower_bound(last_used_faked_root + 1);
+ }
+ assert(it != free_faked_inos.end());
+ vinodeno_t inode_info = in->vino();
+ uint64_t inode_num = (uint64_t)inode_info.ino;
+ ldout(cct, 10) << "inode_num " << inode_num << "inode_num & 0x3ff=" << (inode_num & 0x3ff)<< dendl;
+ last_used_faked_root = it.get_start() + (inode_num & 0x3ff); // 0x3ff mask and get_start will not exceed 2048
+ assert(it.get_start() + it.get_len() > last_used_faked_root);
+
+ in->faked_ino = last_used_faked_root;
+ free_faked_inos.erase(in->faked_ino);
+ faked_ino_map[in->faked_ino] = in->vino();
+}
+
void Client::_release_faked_ino(Inode *in)
{
free_faked_inos.insert(in->faked_ino);
vino = faked_ino_map[ino];
else
vino = vinodeno_t(0, CEPH_NOSNAP);
- ldout(cct, 10) << "map_faked_ino " << ino << " -> " << vino << dendl;
+ ldout(cct, 10) << __func__ << " " << ino << " -> " << vino << dendl;
return vino;
}
vinodeno_t Client::map_faked_ino(ino_t ino)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
return _map_faked_ino(ino);
}
Client::Client(Messenger *m, MonClient *mc, Objecter *objecter_)
: Dispatcher(m->cct),
- m_command_hook(this),
timer(m->cct, client_lock),
- callback_handle(NULL),
- switch_interrupt_cb(NULL),
- remount_cb(NULL),
- ino_invalidate_cb(NULL),
- dentry_invalidate_cb(NULL),
- umask_cb(NULL),
- can_invalidate_dentries(false),
+ messenger(m),
+ monclient(mc),
+ objecter(objecter_),
+ whoami(mc->get_global_id()),
async_ino_invalidator(m->cct),
async_dentry_invalidator(m->cct),
interrupt_finisher(m->cct),
remount_finisher(m->cct),
+ async_ino_releasor(m->cct),
objecter_finisher(m->cct),
- tick_event(NULL),
- messenger(m), monclient(mc),
- objecter(objecter_),
- whoami(mc->get_global_id()), cap_epoch_barrier(0),
- last_tid(0), oldest_tid(0), last_flush_tid(1),
- initialized(false),
- mounted(false), unmounting(false), blacklisted(false),
- local_osd(-ENXIO), local_osd_epoch(0),
- unsafe_sync_write(0),
- client_lock("Client::client_lock"),
- deleg_timeout(0)
+ m_command_hook(this),
+ fscid(0)
{
_reset_faked_inos();
- //
- root = 0;
-
- num_flushing_caps = 0;
-
- _dir_vxattrs_name_size = _vxattrs_calcu_name_size(_dir_vxattrs);
- _file_vxattrs_name_size = _vxattrs_calcu_name_size(_file_vxattrs);
user_id = cct->_conf->client_mount_uid;
group_id = cct->_conf->client_mount_gid;
+ fuse_default_permissions = cct->_conf.get_val<bool>(
+ "fuse_default_permissions");
- acl_type = NO_ACL;
if (cct->_conf->client_acl_type == "posix_acl")
acl_type = POSIX_ACL;
cct->_conf->client_oc_target_dirty,
cct->_conf->client_oc_max_dirty_age,
true));
- objecter_finisher.start();
- filer.reset(new Filer(objecter, &objecter_finisher));
- objecter->enable_blacklist_events();
}
Client::~Client()
{
- assert(!client_lock.is_locked());
+ ceph_assert(ceph_mutex_is_not_locked(client_lock));
// It is necessary to hold client_lock, because any inode destruction
// may call into ObjectCacher, which asserts that it's lock (which is
// client_lock) is held.
- client_lock.Lock();
+ std::lock_guard l{client_lock};
tear_down_cache();
- client_lock.Unlock();
}
void Client::tear_down_cache()
it != fd_map.end();
++it) {
Fh *fh = it->second;
- ldout(cct, 1) << "tear_down_cache forcing close of fh " << it->first << " ino " << fh->inode->ino << dendl;
+ ldout(cct, 1) << __func__ << " forcing close of fh " << it->first << " ino " << fh->inode->ino << dendl;
_release_fh(fh);
}
fd_map.clear();
while (!opened_dirs.empty()) {
dir_result_t *dirp = *opened_dirs.begin();
- ldout(cct, 1) << "tear_down_cache forcing close of dir " << dirp << " ino " << dirp->inode->ino << dendl;
+ ldout(cct, 1) << __func__ << " forcing close of dir " << dirp << " ino " << dirp->inode->ino << dendl;
_closedir(dirp);
}
// empty lru
trim_cache();
- assert(lru.lru_get_size() == 0);
+ ceph_assert(lru.lru_get_size() == 0);
// close root ino
- assert(inode_map.size() <= 1 + root_parents.size());
+ ceph_assert(inode_map.size() <= 1 + root_parents.size());
if (root && inode_map.size() == 1 + root_parents.size()) {
delete root;
root = 0;
_reset_faked_inos();
}
- assert(inode_map.empty());
+ ceph_assert(inode_map.empty());
}
inodeno_t Client::get_root_ino()
{
- Mutex::Locker l(client_lock);
+ std::lock_guard l(client_lock);
if (use_faked_inos())
return root->faked_ino;
else
Inode *Client::get_root()
{
- Mutex::Locker l(client_lock);
+ std::lock_guard l(client_lock);
root->ll_get();
return root;
}
{
set<Inode*> did;
- ldout(cct, 1) << "dump_cache" << dendl;
+ ldout(cct, 1) << __func__ << dendl;
if (f)
f->open_array_section("cache");
void Client::dump_status(Formatter *f)
{
- assert(client_lock.is_locked_by_me());
+ ceph_assert(ceph_mutex_is_locked_by_me(client_lock));
ldout(cct, 1) << __func__ << dendl;
f->dump_int("dentry_count", lru.lru_get_size());
f->dump_int("dentry_pinned_count", lru.lru_get_num_pinned());
f->dump_int("id", get_nodeid().v);
- entity_inst_t inst(messenger->get_myname(), messenger->get_myaddr());
+ entity_inst_t inst(messenger->get_myname(), messenger->get_myaddr_legacy());
f->dump_object("inst", inst);
- f->dump_stream("inst_str") << inst;
- f->dump_stream("addr_str") << inst.addr;
+ f->dump_object("addr", inst.addr);
+ f->dump_stream("inst_str") << inst.name << " " << inst.addr.get_legacy_str();
+ f->dump_string("addr_str", inst.addr.get_legacy_str());
f->dump_int("inode_count", inode_map.size());
f->dump_int("mds_epoch", mdsmap->get_epoch());
f->dump_int("osd_epoch", osd_epoch);
f->dump_int("osd_epoch_barrier", cap_epoch_barrier);
+ f->dump_bool("blacklisted", blacklisted);
+ f->dump_string("fs_name", mdsmap->get_fs_name());
}
}
-int Client::init()
+void Client::_pre_init()
{
timer.init();
- objectcacher->start();
- client_lock.Lock();
- assert(!initialized);
+ objecter_finisher.start();
+ filer.reset(new Filer(objecter, &objecter_finisher));
+ objecter->enable_blacklist_events();
- messenger->add_dispatcher_tail(this);
- client_lock.Unlock();
+ objectcacher->start();
+}
+int Client::init()
+{
+ _pre_init();
+ {
+ std::lock_guard l{client_lock};
+ ceph_assert(!initialized);
+ messenger->add_dispatcher_tail(this);
+ }
_finish_init();
return 0;
}
void Client::_finish_init()
{
- client_lock.Lock();
- // logger
- PerfCountersBuilder plb(cct, "client", l_c_first, l_c_last);
- plb.add_time_avg(l_c_reply, "reply", "Latency of receiving a reply on metadata request");
- plb.add_time_avg(l_c_lat, "lat", "Latency of processing a metadata request");
- plb.add_time_avg(l_c_wrlat, "wrlat", "Latency of a file data write operation");
- logger.reset(plb.create_perf_counters());
- cct->get_perfcounters_collection()->add(logger.get());
-
- client_lock.Unlock();
+ {
+ std::lock_guard l{client_lock};
+ // logger
+ PerfCountersBuilder plb(cct, "client", l_c_first, l_c_last);
+ plb.add_time_avg(l_c_reply, "reply", "Latency of receiving a reply on metadata request");
+ plb.add_time_avg(l_c_lat, "lat", "Latency of processing a metadata request");
+ plb.add_time_avg(l_c_wrlat, "wrlat", "Latency of a file data write operation");
+ plb.add_time_avg(l_c_read, "rdlat", "Latency of a file data read operation");
+ plb.add_time_avg(l_c_fsync, "fsync", "Latency of a file sync operation");
+ logger.reset(plb.create_perf_counters());
+ cct->get_perfcounters_collection()->add(logger.get());
+ }
- cct->_conf->add_observer(this);
+ cct->_conf.add_observer(this);
AdminSocket* admin_socket = cct->get_admin_socket();
int ret = admin_socket->register_command("mds_requests",
- "mds_requests",
&m_command_hook,
"show in-progress mds requests");
if (ret < 0) {
lderr(cct) << "error registering admin socket command: "
<< cpp_strerror(-ret) << dendl;
}
- ret = admin_socket->register_command("mds_sessions",
- "mds_sessions",
+ ret = admin_socket->register_command("mds_sessions "
+ "name=cap_dump,type=CephBool,req=false",
&m_command_hook,
"show mds session state");
if (ret < 0) {
<< cpp_strerror(-ret) << dendl;
}
ret = admin_socket->register_command("dump_cache",
- "dump_cache",
&m_command_hook,
"show in-memory metadata cache contents");
if (ret < 0) {
<< cpp_strerror(-ret) << dendl;
}
ret = admin_socket->register_command("kick_stale_sessions",
- "kick_stale_sessions",
&m_command_hook,
"kick sessions that were remote reset");
if (ret < 0) {
<< cpp_strerror(-ret) << dendl;
}
ret = admin_socket->register_command("status",
- "status",
&m_command_hook,
"show overall client status");
if (ret < 0) {
<< cpp_strerror(-ret) << dendl;
}
- client_lock.Lock();
+ std::lock_guard l{client_lock};
initialized = true;
- client_lock.Unlock();
}
void Client::shutdown()
{
- ldout(cct, 1) << "shutdown" << dendl;
+ ldout(cct, 1) << __func__ << dendl;
// If we were not mounted, but were being used for sending
// MDS commands, we may have sessions that need closing.
- client_lock.Lock();
- _close_sessions();
- client_lock.Unlock();
-
- cct->_conf->remove_observer(this);
+ {
+ std::lock_guard l{client_lock};
+ _close_sessions();
+ }
+ cct->_conf.remove_observer(this);
- AdminSocket* admin_socket = cct->get_admin_socket();
- admin_socket->unregister_command("mds_requests");
- admin_socket->unregister_command("mds_sessions");
- admin_socket->unregister_command("dump_cache");
- admin_socket->unregister_command("kick_stale_sessions");
- admin_socket->unregister_command("status");
+ cct->get_admin_socket()->unregister_commands(&m_command_hook);
if (ino_invalidate_cb) {
ldout(cct, 10) << "shutdown stopping cache invalidator finisher" << dendl;
remount_finisher.stop();
}
- objectcacher->stop(); // outside of client_lock! this does a join.
-
- client_lock.Lock();
- assert(initialized);
- initialized = false;
- timer.shutdown();
- client_lock.Unlock();
+ if (ino_release_cb) {
+ ldout(cct, 10) << "shutdown stopping inode release finisher" << dendl;
+ async_ino_releasor.wait_for_empty();
+ async_ino_releasor.stop();
+ }
+ objectcacher->stop(); // outside of client_lock! this does a join.
+ {
+ std::lock_guard l{client_lock};
+ ceph_assert(initialized);
+ initialized = false;
+ timer.shutdown();
+ }
objecter_finisher.wait_for_empty();
objecter_finisher.stop();
void Client::trim_cache_for_reconnect(MetaSession *s)
{
mds_rank_t mds = s->mds_num;
- ldout(cct, 20) << "trim_cache_for_reconnect mds." << mds << dendl;
+ ldout(cct, 20) << __func__ << " mds." << mds << dendl;
int trimmed = 0;
list<Dentry*> skipped;
for(list<Dentry*>::iterator p = skipped.begin(); p != skipped.end(); ++p)
lru.lru_insert_mid(*p);
- ldout(cct, 20) << "trim_cache_for_reconnect mds." << mds
+ ldout(cct, 20) << __func__ << " mds." << mds
<< " trimmed " << trimmed << " dentries" << dendl;
if (s->caps.size() > 0)
void Client::trim_dentry(Dentry *dn)
{
ldout(cct, 15) << "trim_dentry unlinking dn " << dn->name
- << " in dir " << hex << dn->dir->parent_inode->ino
+ << " in dir "
+ << std::hex << dn->dir->parent_inode->ino << std::dec
<< dendl;
if (dn->inode) {
Inode *diri = dn->dir->parent_inode;
- diri->dir_release_count++;
clear_dir_complete_and_ordered(diri, true);
}
unlink(dn, false, false); // drop dir, drop dentry
bool was_new = false;
if (inode_map.count(st->vino)) {
in = inode_map[st->vino];
- ldout(cct, 12) << "add_update_inode had " << *in << " caps " << ccap_string(st->cap.caps) << dendl;
+ ldout(cct, 12) << __func__ << " had " << *in << " caps " << ccap_string(st->cap.caps) << dendl;
} else {
in = new Inode(this, st->vino, &st->layout);
inode_map[st->vino] = in;
if (!root) {
root = in;
+ if (use_faked_inos())
+ _assign_faked_root(root);
root_ancestor = in;
cwd = root;
} else if (!mounted) {
in->uid = st->uid;
in->gid = st->gid;
in->btime = st->btime;
+ in->snap_btime = st->snap_btime;
}
if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
ldout(cct, 20) << " dir hash is " << (int)in->dir_layout.dl_dir_hash << dendl;
in->rstat = st->rstat;
in->quota = st->quota;
+ in->dir_pin = st->dir_pin;
}
// move me if/when version reflects fragtree changes.
if (in->dirfragtree != st->dirfragtree) {
if ((in->xattr_version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
st->xattrbl.length() &&
st->xattr_version > in->xattr_version) {
- bufferlist::iterator p = st->xattrbl.begin();
- ::decode(in->xattrs, p);
+ auto p = st->xattrbl.cbegin();
+ decode(in->xattrs, p);
in->xattr_version = st->xattr_version;
}
return in; // as with readdir returning indoes in different snaprealms (no caps!)
if (in->snapid == CEPH_NOSNAP) {
- add_update_cap(in, session, st->cap.cap_id, st->cap.caps, st->cap.seq,
- st->cap.mseq, inodeno_t(st->cap.realm), st->cap.flags,
- request_perms);
+ add_update_cap(in, session, st->cap.cap_id, st->cap.caps, st->cap.wanted,
+ st->cap.seq, st->cap.mseq, inodeno_t(st->cap.realm),
+ st->cap.flags, request_perms);
if (in->auth_cap && in->auth_cap->session == session) {
in->max_size = st->max_size;
in->rstat = st->rstat;
if (dir->dentries.count(dname))
dn = dir->dentries[dname];
- ldout(cct, 12) << "insert_dentry_inode '" << dname << "' vino " << in->vino()
+ ldout(cct, 12) << __func__ << " '" << dname << "' vino " << in->vino()
<< " in dir " << dir->parent_inode->vino() << " dn " << dn
<< dendl;
if (old_dentry) {
if (old_dentry->dir != dir) {
Inode *old_diri = old_dentry->dir->parent_inode;
- old_diri->dir_ordered_count++;
clear_dir_complete_and_ordered(old_diri, false);
}
unlink(old_dentry, dir == old_dentry->dir, false); // drop dentry, keep dir open if its the same dir
}
Inode *diri = dir->parent_inode;
- diri->dir_ordered_count++;
clear_dir_complete_and_ordered(diri, false);
dn = link(dir, dname, in, dn);
}
utime_t dttl = from;
dttl += (float)dlease->duration_ms / 1000.0;
- assert(dn);
+ ceph_assert(dn);
- if (dlease->mask & CEPH_LOCK_DN) {
+ if (dlease->mask & CEPH_LEASE_VALID) {
if (dttl > dn->lease_ttl) {
ldout(cct, 10) << "got dentry lease on " << dn->name
<< " dur " << dlease->duration_ms << "ms ttl " << dttl << dendl;
}
}
dn->cap_shared_gen = dn->dir->parent_inode->shared_gen;
+ if (dlease->mask & CEPH_LEASE_PRIMARY_LINK)
+ dn->mark_primary();
}
// replicated
in->dir_replicated = !dst->dist.empty(); // FIXME that's just one frag!
-
- // dist
- /*
- if (!st->dirfrag_dist.empty()) { // FIXME
- set<int> dist = st->dirfrag_dist.begin()->second;
- if (dist.empty() && !in->dir_contacts.empty())
- ldout(cct, 9) << "lost dist spec for " << in->ino
- << " " << dist << dendl;
- if (!dist.empty() && in->dir_contacts.empty())
- ldout(cct, 9) << "got dist spec for " << in->ino
- << " " << dist << dendl;
- in->dir_contacts = dist;
- }
- */
}
void Client::clear_dir_complete_and_ordered(Inode *diri, bool complete)
{
+ if (complete)
+ diri->dir_release_count++;
+ else
+ diri->dir_ordered_count++;
if (diri->flags & I_COMPLETE) {
if (complete) {
ldout(cct, 10) << " clearing (I_COMPLETE|I_DIR_ORDERED) on " << *diri << dendl;
*/
void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, Inode *diri) {
- MClientReply *reply = request->reply;
+ auto& reply = request->reply;
ConnectionRef con = request->reply->get_connection();
- uint64_t features = con->get_features();
+ uint64_t features;
+ if(session->mds_features.test(CEPHFS_FEATURE_REPLY_ENCODING)) {
+ features = (uint64_t)-1;
+ }
+ else {
+ features = con->get_features();
+ }
dir_result_t *dirp = request->dirp;
- assert(dirp);
+ ceph_assert(dirp);
// the extra buffer list is only set for readdir and lssnap replies
- bufferlist::iterator p = reply->get_extra_bl().begin();
+ auto p = reply->get_extra_bl().cbegin();
if (!p.end()) {
// snapdir?
if (request->head.op == CEPH_MDS_OP_LSSNAP) {
- assert(diri);
+ ceph_assert(diri);
diri = open_snapdir(diri);
}
// only open dir if we're actually adding stuff to it!
Dir *dir = diri->open_dir();
- assert(dir);
+ ceph_assert(dir);
// dirstat
- DirStat dst(p);
+ DirStat dst(p, features);
__u32 numdn;
__u16 flags;
- ::decode(numdn, p);
- ::decode(flags, p);
+ decode(numdn, p);
+ decode(flags, p);
bool end = ((unsigned)flags & CEPH_READDIR_FRAG_END);
bool hash_order = ((unsigned)flags & CEPH_READDIR_HASH_ORDER);
frag_t fg = (unsigned)request->head.args.readdir.frag;
unsigned readdir_offset = dirp->next_offset;
string readdir_start = dirp->last_name;
- assert(!readdir_start.empty() || readdir_offset == 2);
+ ceph_assert(!readdir_start.empty() || readdir_offset == 2);
unsigned last_hash = 0;
if (hash_order) {
string dname;
LeaseStat dlease;
for (unsigned i=0; i<numdn; i++) {
- ::decode(dname, p);
- ::decode(dlease, p);
+ decode(dname, p);
+ dlease.decode(p, features);
InodeStat ist(p, features);
ldout(cct, 15) << "" << i << ": '" << dname << "'" << dendl;
// replace incorrect dentry
unlink(olddn, true, true); // keep dir, dentry
dn = link(dir, dname, in, olddn);
- assert(dn == olddn);
+ ceph_assert(dn == olddn);
} else {
// keep existing dn
dn = olddn;
dirp->start_shared_gen == diri->shared_gen) {
if (dirp->cache_index == dir->readdir_cache.size()) {
if (i == 0) {
- assert(!dirp->inode->is_complete_and_ordered());
+ ceph_assert(!dirp->inode->is_complete_and_ordered());
dir->readdir_cache.reserve(dirp->cache_index + numdn);
}
dir->readdir_cache.push_back(dn);
} else if (dirp->cache_index < dir->readdir_cache.size()) {
if (dirp->inode->is_complete_and_ordered())
- assert(dir->readdir_cache[dirp->cache_index] == dn);
+ ceph_assert(dir->readdir_cache[dirp->cache_index] == dn);
else
dir->readdir_cache[dirp->cache_index] = dn;
} else {
- assert(0 == "unexpected readdir buffer idx");
+ ceph_abort_msg("unexpected readdir buffer idx");
}
dirp->cache_index++;
}
*/
Inode* Client::insert_trace(MetaRequest *request, MetaSession *session)
{
- MClientReply *reply = request->reply;
+ auto& reply = request->reply;
int op = request->get_op();
ldout(cct, 10) << "insert_trace from " << request->sent_stamp << " mds." << session->mds_num
<< " is_dentry=" << (int)reply->head.is_dentry
<< dendl;
- bufferlist::iterator p = reply->get_trace_bl().begin();
+ auto p = reply->get_trace_bl().cbegin();
if (request->got_unsafe) {
ldout(cct, 10) << "insert_trace -- already got unsafe; ignoring" << dendl;
- assert(p.end());
+ ceph_assert(p.end());
return NULL;
}
Dentry *d = request->dentry();
if (d) {
Inode *diri = d->dir->parent_inode;
- diri->dir_release_count++;
clear_dir_complete_and_ordered(diri, true);
}
// rename
Dentry *od = request->old_dentry();
ldout(cct, 10) << " unlinking rename src dn " << od << " for traceless reply" << dendl;
- assert(od);
+ ceph_assert(od);
unlink(od, true, true); // keep dir, dentry
} else if (op == CEPH_MDS_OP_RMDIR ||
op == CEPH_MDS_OP_UNLINK) {
}
ConnectionRef con = request->reply->get_connection();
- uint64_t features = con->get_features();
+ uint64_t features;
+ if (session->mds_features.test(CEPHFS_FEATURE_REPLY_ENCODING)) {
+ features = (uint64_t)-1;
+ }
+ else {
+ features = con->get_features();
+ }
ldout(cct, 10) << " features 0x" << hex << features << dec << dendl;
// snap trace
if (reply->head.is_dentry) {
dirst.decode(p, features);
- dst.decode(p);
- ::decode(dname, p);
- ::decode(dlease, p);
+ dst.decode(p, features);
+ decode(dname, p);
+ dlease.decode(p, features);
}
Inode *in = 0;
if ((wanted & CEPH_CAP_XATTR_SHARED) &&
!(ist.xattr_version > 0 && ist.xattrbl.length() > 0))
- assert(0 == "MDS reply does not contain xattrs");
+ ceph_abort_msg("MDS reply does not contain xattrs");
}
in = add_update_inode(&ist, request->sent_stamp, session,
if (diri->dir && diri->dir->dentries.count(dname)) {
dn = diri->dir->dentries[dname];
if (dn->inode) {
- diri->dir_ordered_count++;
clear_dir_complete_and_ordered(diri, false);
unlink(dn, true, true); // keep dir, dentry
}
// fake it for snap lookup
vinodeno_t vino = ist.vino;
vino.snapid = CEPH_SNAPDIR;
- assert(inode_map.count(vino));
+ ceph_assert(inode_map.count(vino));
diri = inode_map[vino];
string dname = request->path.last_dentry();
Inode *in = NULL;
Dentry *de = NULL;
- Cap *cap = NULL;
if (req->resend_mds >= 0) {
mds = req->resend_mds;
req->resend_mds = -1;
- ldout(cct, 10) << "choose_target_mds resend_mds specified as mds." << mds << dendl;
+ ldout(cct, 10) << __func__ << " resend_mds specified as mds." << mds << dendl;
goto out;
}
in = req->inode();
de = req->dentry();
if (in) {
- ldout(cct, 20) << "choose_target_mds starting with req->inode " << *in << dendl;
+ ldout(cct, 20) << __func__ << " starting with req->inode " << *in << dendl;
if (req->path.depth()) {
hash = in->hash_dentry_name(req->path[0]);
- ldout(cct, 20) << "choose_target_mds inode dir hash is " << (int)in->dir_layout.dl_dir_hash
+ ldout(cct, 20) << __func__ << " inode dir hash is " << (int)in->dir_layout.dl_dir_hash
<< " on " << req->path[0]
<< " => " << hash << dendl;
is_hash = true;
} else if (de) {
if (de->inode) {
in = de->inode.get();
- ldout(cct, 20) << "choose_target_mds starting with req->dentry inode " << *in << dendl;
+ ldout(cct, 20) << __func__ << " starting with req->dentry inode " << *in << dendl;
} else {
in = de->dir->parent_inode;
hash = in->hash_dentry_name(de->name);
- ldout(cct, 20) << "choose_target_mds dentry dir hash is " << (int)in->dir_layout.dl_dir_hash
+ ldout(cct, 20) << __func__ << " dentry dir hash is " << (int)in->dir_layout.dl_dir_hash
<< " on " << de->name
<< " => " << hash << dendl;
is_hash = true;
}
if (in) {
if (in->snapid != CEPH_NOSNAP) {
- ldout(cct, 10) << "choose_target_mds " << *in << " is snapped, using nonsnap parent" << dendl;
+ ldout(cct, 10) << __func__ << " " << *in << " is snapped, using nonsnap parent" << dendl;
while (in->snapid != CEPH_NOSNAP) {
if (in->snapid == CEPH_SNAPDIR)
in = in->snapdir_parent.get();
- else if (!in->dn_set.empty())
+ else if (!in->dentries.empty())
/* In most cases there will only be one dentry, so getting it
* will be the correct action. If there are multiple hard links,
* I think the MDS should be able to redirect as needed*/
is_hash = false;
}
- ldout(cct, 20) << "choose_target_mds " << *in << " is_hash=" << is_hash
+ ldout(cct, 20) << __func__ << " " << *in << " is_hash=" << is_hash
<< " hash=" << hash << dendl;
if (is_hash && S_ISDIR(in->mode) && !in->fragmap.empty()) {
mds = in->auth_cap->session->mds_num;
}
if (mds >= 0) {
- ldout(cct, 10) << "choose_target_mds from dirfragtree hash" << dendl;
+ ldout(cct, 10) << __func__ << " from dirfragtree hash" << dendl;
goto out;
}
}
- if (req->auth_is_best())
- cap = in->auth_cap;
- if (!cap && !in->caps.empty())
- cap = in->caps.begin()->second;
- if (!cap)
+ if (in->auth_cap && req->auth_is_best()) {
+ mds = in->auth_cap->session->mds_num;
+ } else if (!in->caps.empty()) {
+ mds = in->caps.begin()->second.session->mds_num;
+ } else {
goto random_mds;
- mds = cap->session->mds_num;
- ldout(cct, 10) << "choose_target_mds from caps on inode " << *in << dendl;
+ }
+ ldout(cct, 10) << __func__ << " from caps on inode " << *in << dendl;
goto out;
}
void Client::connect_mds_targets(mds_rank_t mds)
{
- ldout(cct, 10) << "connect_mds_targets for mds." << mds << dendl;
- assert(mds_sessions.count(mds));
+ ldout(cct, 10) << __func__ << " for mds." << mds << dendl;
+ ceph_assert(mds_sessions.count(mds));
const MDSMap::mds_info_t& info = mdsmap->get_mds_info(mds);
for (set<mds_rank_t>::const_iterator q = info.export_targets.begin();
q != info.export_targets.end();
}
}
-void Client::dump_mds_sessions(Formatter *f)
+void Client::dump_mds_sessions(Formatter *f, bool cap_dump)
{
f->dump_int("id", get_nodeid().v);
- entity_inst_t inst(messenger->get_myname(), messenger->get_myaddr());
+ entity_inst_t inst(messenger->get_myname(), messenger->get_myaddr_legacy());
f->dump_object("inst", inst);
f->dump_stream("inst_str") << inst;
f->dump_stream("addr_str") << inst.addr;
f->open_array_section("sessions");
- for (map<mds_rank_t,MetaSession*>::const_iterator p = mds_sessions.begin(); p != mds_sessions.end(); ++p) {
+ for (const auto &p : mds_sessions) {
f->open_object_section("session");
- p->second->dump(f);
+ p.second.dump(f, cap_dump);
f->close_section();
}
f->close_section();
}
}
-int Client::verify_reply_trace(int r,
- MetaRequest *request, MClientReply *reply,
+int Client::verify_reply_trace(int r, MetaSession *session,
+ MetaRequest *request, const MConstRef<MClientReply>& reply,
InodeRef *ptarget, bool *pcreated,
const UserPerm& perms)
{
bool got_created_ino = false;
ceph::unordered_map<vinodeno_t, Inode*>::iterator p;
- extra_bl.claim(reply->get_extra_bl());
+ extra_bl = reply->get_extra_bl();
if (extra_bl.length() >= 8) {
- // if the extra bufferlist has a buffer, we assume its the created inode
- // and that this request to create succeeded in actually creating
- // the inode (won the race with other create requests)
- ::decode(created_ino, extra_bl);
- got_created_ino = true;
+ if (session->mds_features.test(CEPHFS_FEATURE_DELEG_INO)) {
+ struct openc_response_t ocres;
+
+ decode(ocres, extra_bl);
+ created_ino = ocres.created_ino;
+ /*
+ * The userland cephfs client doesn't have a way to do an async create
+ * (yet), so just discard delegated_inos for now. Eventually we should
+ * store them and use them in create calls, even if they are synchronous,
+ * if only for testing purposes.
+ */
+ ldout(cct, 10) << "delegated_inos: " << ocres.delegated_inos << dendl;
+ } else {
+ // u64 containing number of created ino
+ decode(created_ino, extra_bl);
+ }
ldout(cct, 10) << "make_request created ino " << created_ino << dendl;
+ got_created_ino = true;
}
if (pcreated)
&target, perms);
} else {
// if the dentry is not linked, just do our best. see #5021.
- assert(0 == "how did this happen? i want logs!");
+ ceph_abort_msg("how did this happen? i want logs!");
}
} else {
Inode *in = request->inode();
if (use_mds >= 0)
request->resend_mds = use_mds;
+ MetaSession *session = NULL;
while (1) {
if (request->aborted())
break;
}
// set up wait cond
- Cond caller_cond;
+ ceph::condition_variable caller_cond;
request->caller_cond = &caller_cond;
// choose mds
}
// open a session?
- MetaSession *session = NULL;
if (!have_open_session(mds)) {
session = _get_or_open_mds_session(mds);
-
+ if (session->state == MetaSession::STATE_REJECTED) {
+ request->abort(-EPERM);
+ break;
+ }
// wait
if (session->state == MetaSession::STATE_OPENING) {
ldout(cct, 10) << "waiting for session to mds." << mds << " to open" << dendl;
wait_on_context_list(session->waiting_for_open);
- // Abort requests on REJECT from MDS
- if (rejected_by_mds.count(mds)) {
- request->abort(-EPERM);
- break;
- }
continue;
}
if (!have_open_session(mds))
continue;
} else {
- session = mds_sessions[mds];
+ session = &mds_sessions.at(mds);
}
// send request.
// wait for signal
ldout(cct, 20) << "awaiting reply|forward|kick on " << &caller_cond << dendl;
request->kick = false;
- while (!request->reply && // reply
- request->resend_mds < 0 && // forward
- !request->kick)
- caller_cond.Wait(client_lock);
- request->caller_cond = NULL;
+ std::unique_lock l{client_lock, std::adopt_lock};
+ caller_cond.wait(l, [request] {
+ return (request->reply || // reply
+ request->resend_mds >= 0 || // forward
+ request->kick);
+ });
+ l.release();
+ request->caller_cond = nullptr;
// did we get a reply?
if (request->reply)
}
if (!request->reply) {
- assert(request->aborted());
- assert(!request->got_unsafe);
+ ceph_assert(request->aborted());
+ ceph_assert(!request->got_unsafe);
r = request->get_abort_code();
request->item.remove_myself();
unregister_request(request);
- put_request(request); // ours
+ put_request(request);
return r;
}
// got it!
- MClientReply *reply = request->reply;
- request->reply = NULL;
+ auto reply = std::move(request->reply);
r = reply->get_result();
if (r >= 0)
request->success = true;
// kick dispatcher (we've got it!)
- assert(request->dispatch_cond);
- request->dispatch_cond->Signal();
+ ceph_assert(request->dispatch_cond);
+ request->dispatch_cond->notify_all();
ldout(cct, 20) << "sendrecv kickback on tid " << tid << " " << request->dispatch_cond << dendl;
request->dispatch_cond = 0;
if (r >= 0 && ptarget)
- r = verify_reply_trace(r, request, reply, ptarget, pcreated, perms);
+ r = verify_reply_trace(r, session, request, reply, ptarget, pcreated, perms);
if (pdirbl)
- pdirbl->claim(reply->get_extra_bl());
+ *pdirbl = reply->get_extra_bl();
// -- log times --
utime_t lat = ceph_clock_now();
logger->tinc(l_c_reply, lat);
put_request(request);
-
- reply->put();
return r;
}
mds_rank_t mds, int drop,
int unless, int force)
{
- ldout(cct, 20) << "encode_inode_release enter(in:" << *in << ", req:" << req
+ ldout(cct, 20) << __func__ << " enter(in:" << *in << ", req:" << req
<< " mds:" << mds << ", drop:" << drop << ", unless:" << unless
- << ", have:" << ", force:" << force << ")" << dendl;
+ << ", force:" << force << ")" << dendl;
int released = 0;
- if (in->caps.count(mds)) {
- Cap *caps = in->caps[mds];
+ auto it = in->caps.find(mds);
+ if (it != in->caps.end()) {
+ Cap &cap = it->second;
drop &= ~(in->dirty_caps | get_caps_used(in));
- if ((drop & caps->issued) &&
- !(unless & caps->issued)) {
- ldout(cct, 25) << "Dropping caps. Initial " << ccap_string(caps->issued) << dendl;
- caps->issued &= ~drop;
- caps->implemented &= ~drop;
+ if ((drop & cap.issued) &&
+ !(unless & cap.issued)) {
+ ldout(cct, 25) << "dropping caps " << ccap_string(drop) << dendl;
+ cap.issued &= ~drop;
+ cap.implemented &= ~drop;
released = 1;
- ldout(cct, 25) << "Now have: " << ccap_string(caps->issued) << dendl;
} else {
released = force;
}
if (released) {
+ cap.wanted = in->caps_wanted();
+ if (&cap == in->auth_cap &&
+ !(cap.wanted & CEPH_CAP_ANY_FILE_WR)) {
+ in->requested_max_size = 0;
+ ldout(cct, 25) << "reset requested_max_size due to not wanting any file write cap" << dendl;
+ }
ceph_mds_request_release rel;
rel.ino = in->ino;
- rel.cap_id = caps->cap_id;
- rel.seq = caps->seq;
- rel.issue_seq = caps->issue_seq;
- rel.mseq = caps->mseq;
- rel.caps = caps->implemented;
- rel.wanted = caps->wanted;
+ rel.cap_id = cap.cap_id;
+ rel.seq = cap.seq;
+ rel.issue_seq = cap.issue_seq;
+ rel.mseq = cap.mseq;
+ rel.caps = cap.implemented;
+ rel.wanted = cap.wanted;
rel.dname_len = 0;
rel.dname_seq = 0;
req->cap_releases.push_back(MClientRequest::Release(rel,""));
}
}
- ldout(cct, 25) << "encode_inode_release exit(in:" << *in << ") released:"
+ ldout(cct, 25) << __func__ << " exit(in:" << *in << ") released:"
<< released << dendl;
return released;
}
void Client::encode_dentry_release(Dentry *dn, MetaRequest *req,
mds_rank_t mds, int drop, int unless)
{
- ldout(cct, 20) << "encode_dentry_release enter(dn:"
+ ldout(cct, 20) << __func__ << " enter(dn:"
<< dn << ")" << dendl;
int released = 0;
if (dn->dir)
mds, drop, unless, 1);
if (released && dn->lease_mds == mds) {
ldout(cct, 25) << "preemptively releasing dn to mds" << dendl;
- MClientRequest::Release& rel = req->cap_releases.back();
+ auto& rel = req->cap_releases.back();
rel.item.dname_len = dn->name.length();
rel.item.dname_seq = dn->lease_seq;
rel.dname = dn->name;
+ dn->lease_mds = -1;
}
- ldout(cct, 25) << "encode_dentry_release exit(dn:"
+ ldout(cct, 25) << __func__ << " exit(dn:"
<< dn << ")" << dendl;
}
*/
void Client::encode_cap_releases(MetaRequest *req, mds_rank_t mds)
{
- ldout(cct, 20) << "encode_cap_releases enter (req: "
+ ldout(cct, 20) << __func__ << " enter (req: "
<< req << ", mds: " << mds << ")" << dendl;
if (req->inode_drop && req->inode())
encode_inode_release(req->inode(), req,
encode_dentry_release(req->old_dentry(), req,
mds, req->old_dentry_drop,
req->old_dentry_unless);
- ldout(cct, 25) << "encode_cap_releases exit (req: "
+ ldout(cct, 25) << __func__ << " exit (req: "
<< req << ", mds " << mds <<dendl;
}
bool Client::have_open_session(mds_rank_t mds)
{
- return
- mds_sessions.count(mds) &&
- (mds_sessions[mds]->state == MetaSession::STATE_OPEN ||
- mds_sessions[mds]->state == MetaSession::STATE_STALE);
+ const auto &it = mds_sessions.find(mds);
+ return it != mds_sessions.end() &&
+ (it->second.state == MetaSession::STATE_OPEN ||
+ it->second.state == MetaSession::STATE_STALE);
}
MetaSession *Client::_get_mds_session(mds_rank_t mds, Connection *con)
{
- if (mds_sessions.count(mds) == 0)
+ const auto &it = mds_sessions.find(mds);
+ if (it == mds_sessions.end() || it->second.con != con) {
return NULL;
- MetaSession *s = mds_sessions[mds];
- if (s->con != con)
- return NULL;
- return s;
+ } else {
+ return &it->second;
+ }
}
MetaSession *Client::_get_or_open_mds_session(mds_rank_t mds)
{
- if (mds_sessions.count(mds))
- return mds_sessions[mds];
- return _open_mds_session(mds);
+ auto it = mds_sessions.find(mds);
+ return it == mds_sessions.end() ? _open_mds_session(mds) : &it->second;
}
/**
*/
void Client::update_metadata(std::string const &k, std::string const &v)
{
- Mutex::Locker l(client_lock);
- assert(initialized);
+ std::lock_guard l(client_lock);
+ ceph_assert(initialized);
- if (metadata.count(k)) {
+ auto it = metadata.find(k);
+ if (it != metadata.end()) {
ldout(cct, 1) << __func__ << " warning, overriding metadata field '" << k
- << "' from '" << metadata[k] << "' to '" << v << "'" << dendl;
+ << "' from '" << it->second << "' to '" << v << "'" << dendl;
}
metadata[k] = v;
MetaSession *Client::_open_mds_session(mds_rank_t mds)
{
- ldout(cct, 10) << "_open_mds_session mds." << mds << dendl;
- assert(mds_sessions.count(mds) == 0);
- MetaSession *session = new MetaSession;
- session->mds_num = mds;
- session->seq = 0;
- session->inst = mdsmap->get_inst(mds);
- session->con = messenger->get_connection(session->inst);
- session->state = MetaSession::STATE_OPENING;
- session->mds_state = MDSMap::STATE_NULL;
- mds_sessions[mds] = session;
-
- // Maybe skip sending a request to open if this MDS daemon
- // has previously sent us a REJECT.
- if (rejected_by_mds.count(mds)) {
- if (rejected_by_mds[mds] == session->inst) {
- ldout(cct, 4) << "_open_mds_session mds." << mds << " skipping "
- "because we were rejected" << dendl;
- return session;
- } else {
- ldout(cct, 4) << "_open_mds_session mds." << mds << " old inst "
- "rejected us, trying with new inst" << dendl;
- rejected_by_mds.erase(mds);
- }
- }
-
- MClientSession *m = new MClientSession(CEPH_SESSION_REQUEST_OPEN);
- m->client_meta = metadata;
- session->con->send_message(m);
+ ldout(cct, 10) << __func__ << " mds." << mds << dendl;
+ auto addrs = mdsmap->get_addrs(mds);
+ auto em = mds_sessions.emplace(std::piecewise_construct,
+ std::forward_as_tuple(mds),
+ std::forward_as_tuple(mds, messenger->connect_to_mds(addrs), addrs));
+ ceph_assert(em.second); /* not already present */
+ MetaSession *session = &em.first->second;
+
+ auto m = make_message<MClientSession>(CEPH_SESSION_REQUEST_OPEN);
+ m->metadata = metadata;
+ m->supported_features = feature_bitset_t(CEPHFS_FEATURES_CLIENT_SUPPORTED);
+ session->con->send_message2(std::move(m));
return session;
}
void Client::_close_mds_session(MetaSession *s)
{
- ldout(cct, 2) << "_close_mds_session mds." << s->mds_num << " seq " << s->seq << dendl;
+ ldout(cct, 2) << __func__ << " mds." << s->mds_num << " seq " << s->seq << dendl;
s->state = MetaSession::STATE_CLOSING;
- s->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_CLOSE, s->seq));
+ s->con->send_message2(make_message<MClientSession>(CEPH_SESSION_REQUEST_CLOSE, s->seq));
}
-void Client::_closed_mds_session(MetaSession *s)
+void Client::_closed_mds_session(MetaSession *s, int err, bool rejected)
{
- s->state = MetaSession::STATE_CLOSED;
+ ldout(cct, 5) << __func__ << " mds." << s->mds_num << " seq " << s->seq << dendl;
+ if (rejected && s->state != MetaSession::STATE_CLOSING)
+ s->state = MetaSession::STATE_REJECTED;
+ else
+ s->state = MetaSession::STATE_CLOSED;
s->con->mark_down();
signal_context_list(s->waiting_for_open);
- mount_cond.Signal();
- remove_session_caps(s);
+ mount_cond.notify_all();
+ remove_session_caps(s, err);
kick_requests_closed(s);
- mds_sessions.erase(s->mds_num);
- delete s;
+ mds_ranks_closing.erase(s->mds_num);
+ if (s->state == MetaSession::STATE_CLOSED)
+ mds_sessions.erase(s->mds_num);
}
-void Client::handle_client_session(MClientSession *m)
+void Client::handle_client_session(const MConstRef<MClientSession>& m)
{
mds_rank_t from = mds_rank_t(m->get_source().num());
- ldout(cct, 10) << "handle_client_session " << *m << " from mds." << from << dendl;
+ ldout(cct, 10) << __func__ << " " << *m << " from mds." << from << dendl;
MetaSession *session = _get_mds_session(from, m->get_connection().get());
if (!session) {
ldout(cct, 10) << " discarding session message from sessionless mds " << m->get_source_inst() << dendl;
- m->put();
return;
}
switch (m->get_op()) {
case CEPH_SESSION_OPEN:
- renew_caps(session);
- session->state = MetaSession::STATE_OPEN;
- if (unmounting)
- mount_cond.Signal();
- else
- connect_mds_targets(from);
- signal_context_list(session->waiting_for_open);
- break;
+ {
+ feature_bitset_t missing_features(CEPHFS_FEATURES_CLIENT_REQUIRED);
+ missing_features -= m->supported_features;
+ if (!missing_features.empty()) {
+ lderr(cct) << "mds." << from << " lacks required features '"
+ << missing_features << "', closing session " << dendl;
+ _close_mds_session(session);
+ _closed_mds_session(session, -EPERM, true);
+ break;
+ }
+ session->mds_features = std::move(m->supported_features);
+
+ renew_caps(session);
+ session->state = MetaSession::STATE_OPEN;
+ if (unmounting)
+ mount_cond.notify_all();
+ else
+ connect_mds_targets(from);
+ signal_context_list(session->waiting_for_open);
+ break;
+ }
case CEPH_SESSION_CLOSE:
_closed_mds_session(session);
case CEPH_SESSION_RENEWCAPS:
if (session->cap_renew_seq == m->get_seq()) {
+ bool was_stale = ceph_clock_now() >= session->cap_ttl;
session->cap_ttl =
session->last_cap_renew_request + mdsmap->get_session_timeout();
- wake_inode_waiters(session);
+ if (was_stale)
+ wake_up_session_caps(session, false);
}
break;
break;
case CEPH_SESSION_FLUSHMSG:
- session->con->send_message(new MClientSession(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq()));
+ /* flush cap release */
+ if (auto& m = session->release; m) {
+ session->con->send_message2(std::move(m));
+ }
+ session->con->send_message2(make_message<MClientSession>(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq()));
break;
case CEPH_SESSION_FORCE_RO:
break;
case CEPH_SESSION_REJECT:
- rejected_by_mds[session->mds_num] = session->inst;
- _closed_mds_session(session);
+ {
+ std::string_view error_str;
+ auto it = m->metadata.find("error_string");
+ if (it != m->metadata.end())
+ error_str = it->second;
+ else
+ error_str = "unknown error";
+ lderr(cct) << "mds." << from << " rejected us (" << error_str << ")" << dendl;
+ _closed_mds_session(session, -EPERM, true);
+ }
break;
default:
ceph_abort();
}
-
- m->put();
}
bool Client::_any_stale_sessions() const
{
- assert(client_lock.is_locked_by_me());
+ ceph_assert(ceph_mutex_is_locked_by_me(client_lock));
- for (const auto &i : mds_sessions) {
- if (i.second->state == MetaSession::STATE_STALE) {
+ for (const auto &p : mds_sessions) {
+ if (p.second.state == MetaSession::STATE_STALE) {
return true;
}
}
void Client::_kick_stale_sessions()
{
- ldout(cct, 1) << "kick_stale_sessions" << dendl;
+ ldout(cct, 1) << __func__ << dendl;
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end(); ) {
- MetaSession *s = p->second;
- ++p;
- if (s->state == MetaSession::STATE_STALE)
- _closed_mds_session(s);
+ for (auto it = mds_sessions.begin(); it != mds_sessions.end(); ) {
+ MetaSession &s = it->second;
+ if (s.state == MetaSession::STATE_REJECTED) {
+ mds_sessions.erase(it++);
+ continue;
+ }
+ ++it;
+ if (s.state == MetaSession::STATE_STALE)
+ _closed_mds_session(&s);
}
}
{
// make the request
mds_rank_t mds = session->mds_num;
- ldout(cct, 10) << "send_request rebuilding request " << request->get_tid()
+ ldout(cct, 10) << __func__ << " rebuilding request " << request->get_tid()
<< " for mds." << mds << dendl;
- MClientRequest *r = build_client_request(request);
+ auto r = build_client_request(request);
if (request->dentry()) {
r->set_dentry_wanted();
}
if (request->mds == -1) {
request->sent_stamp = ceph_clock_now();
- ldout(cct, 20) << "send_request set sent_stamp to " << request->sent_stamp << dendl;
+ ldout(cct, 20) << __func__ << " set sent_stamp to " << request->sent_stamp << dendl;
}
request->mds = mds;
Inode *in = request->inode();
- if (in && in->caps.count(mds))
- request->sent_on_mseq = in->caps[mds]->mseq;
+ if (in) {
+ auto it = in->caps.find(mds);
+ if (it != in->caps.end()) {
+ request->sent_on_mseq = it->second.mseq;
+ }
+ }
session->requests.push_back(&request->item);
- ldout(cct, 10) << "send_request " << *r << " to mds." << mds << dendl;
- session->con->send_message(r);
+ ldout(cct, 10) << __func__ << " " << *r << " to mds." << mds << dendl;
+ session->con->send_message2(std::move(r));
}
-MClientRequest* Client::build_client_request(MetaRequest *request)
+ref_t<MClientRequest> Client::build_client_request(MetaRequest *request)
{
- MClientRequest *req = new MClientRequest(request->get_op());
+ auto req = make_message<MClientRequest>(request->get_op());
req->set_tid(request->tid);
req->set_stamp(request->op_stamp);
memcpy(&req->head, &request->head, sizeof(ceph_mds_request_head));
-void Client::handle_client_request_forward(MClientRequestForward *fwd)
+void Client::handle_client_request_forward(const MConstRef<MClientRequestForward>& fwd)
{
mds_rank_t mds = mds_rank_t(fwd->get_source().num());
MetaSession *session = _get_mds_session(mds, fwd->get_connection().get());
if (!session) {
- fwd->put();
return;
}
ceph_tid_t tid = fwd->get_tid();
if (mds_requests.count(tid) == 0) {
- ldout(cct, 10) << "handle_client_request_forward no pending request on tid " << tid << dendl;
- fwd->put();
+ ldout(cct, 10) << __func__ << " no pending request on tid " << tid << dendl;
return;
}
MetaRequest *request = mds_requests[tid];
- assert(request);
+ ceph_assert(request);
// reset retry counter
request->retry_attempt = 0;
// request not forwarded, or dest mds has no session.
// resend.
- ldout(cct, 10) << "handle_client_request tid " << tid
+ ldout(cct, 10) << __func__ << " tid " << tid
<< " fwd " << fwd->get_num_fwd()
<< " to mds." << fwd->get_dest_mds()
<< ", resending to " << fwd->get_dest_mds()
request->item.remove_myself();
request->num_fwd = fwd->get_num_fwd();
request->resend_mds = fwd->get_dest_mds();
- request->caller_cond->Signal();
-
- fwd->put();
+ request->caller_cond->notify_all();
}
bool Client::is_dir_operation(MetaRequest *req)
return false;
}
-void Client::handle_client_reply(MClientReply *reply)
+void Client::handle_client_reply(const MConstRef<MClientReply>& reply)
{
mds_rank_t mds_num = mds_rank_t(reply->get_source().num());
MetaSession *session = _get_mds_session(mds_num, reply->get_connection().get());
if (!session) {
- reply->put();
return;
}
bool is_safe = reply->is_safe();
if (mds_requests.count(tid) == 0) {
- lderr(cct) << "handle_client_reply no pending request on tid " << tid
+ lderr(cct) << __func__ << " no pending request on tid " << tid
<< " safe is:" << is_safe << dendl;
- reply->put();
return;
}
MetaRequest *request = mds_requests.at(tid);
- ldout(cct, 20) << "handle_client_reply got a reply. Safe:" << is_safe
+ ldout(cct, 20) << __func__ << " got a reply. Safe:" << is_safe
<< " tid " << tid << dendl;
if (request->got_unsafe && !is_safe) {
//duplicate response
ldout(cct, 0) << "got a duplicate reply on tid " << tid << " from mds "
<< mds_num << " safe:" << is_safe << dendl;
- reply->put();
return;
}
request->send_to_auth = true;
request->resend_mds = choose_target_mds(request);
Inode *in = request->inode();
+ std::map<mds_rank_t, Cap>::const_iterator it;
if (request->resend_mds >= 0 &&
request->resend_mds == request->mds &&
(in == NULL ||
- in->caps.count(request->resend_mds) == 0 ||
- request->sent_on_mseq == in->caps[request->resend_mds]->mseq)) {
- // have to return ESTALE
+ (it = in->caps.find(request->resend_mds)) != in->caps.end() ||
+ request->sent_on_mseq == it->second.mseq)) {
+ ldout(cct, 20) << "have to return ESTALE" << dendl;
} else {
- request->caller_cond->Signal();
- reply->put();
+ request->caller_cond->notify_all();
return;
}
- ldout(cct, 20) << "have to return ESTALE" << dendl;
}
- assert(request->reply == NULL);
+ ceph_assert(!request->reply);
request->reply = reply;
insert_trace(request, session);
session->unsafe_requests.push_back(&request->unsafe_item);
if (is_dir_operation(request)) {
Inode *dir = request->inode();
- assert(dir);
+ ceph_assert(dir);
dir->unsafe_ops.push_back(&request->unsafe_dir_item);
}
if (request->target) {
// Only signal the caller once (on the first reply):
// Either its an unsafe reply, or its a safe reply and no unsafe reply was sent.
if (!is_safe || !request->got_unsafe) {
- Cond cond;
+ ceph::condition_variable cond;
request->dispatch_cond = &cond;
// wake up waiter
- ldout(cct, 20) << "handle_client_reply signalling caller " << (void*)request->caller_cond << dendl;
- request->caller_cond->Signal();
+ ldout(cct, 20) << __func__ << " signalling caller " << (void*)request->caller_cond << dendl;
+ request->caller_cond->notify_all();
// wake for kick back
- while (request->dispatch_cond) {
- ldout(cct, 20) << "handle_client_reply awaiting kickback on tid " << tid << " " << &cond << dendl;
- cond.Wait(client_lock);
- }
+ std::unique_lock l{client_lock, std::adopt_lock};
+ cond.wait(l, [tid, request, &cond, this] {
+ if (request->dispatch_cond) {
+ ldout(cct, 20) << "handle_client_reply awaiting kickback on tid "
+ << tid << " " << &cond << dendl;
+ }
+ return !request->dispatch_cond;
+ });
+ l.release();
}
if (is_safe) {
unregister_request(request);
}
if (unmounting)
- mount_cond.Signal();
+ mount_cond.notify_all();
}
void Client::_handle_full_flag(int64_t pool)
}
}
-void Client::handle_osd_map(MOSDMap *m)
+void Client::handle_osd_map(const MConstRef<MOSDMap>& m)
{
std::set<entity_addr_t> new_blacklists;
objecter->consume_blacklist_events(&new_blacklists);
- const auto myaddr = messenger->get_myaddr();
- if (!blacklisted && new_blacklists.count(myaddr)) {
+ const auto myaddrs = messenger->get_myaddrs();
+ bool new_blacklist = false;
+ bool prenautilus = objecter->with_osdmap(
+ [&](const OSDMap& o) {
+ return o.require_osd_release < ceph_release_t::nautilus;
+ });
+ if (!blacklisted) {
+ for (auto a : myaddrs.v) {
+ // blacklist entries are always TYPE_ANY for nautilus+
+ a.set_type(entity_addr_t::TYPE_ANY);
+ if (new_blacklists.count(a)) {
+ new_blacklist = true;
+ break;
+ }
+ if (prenautilus) {
+ // ...except pre-nautilus, they were TYPE_LEGACY
+ a.set_type(entity_addr_t::TYPE_LEGACY);
+ if (new_blacklists.count(a)) {
+ new_blacklist = true;
+ break;
+ }
+ }
+ }
+ }
+ if (new_blacklist) {
auto epoch = objecter->with_osdmap([](const OSDMap &o){
return o.get_epoch();
});
lderr(cct) << "I was blacklisted at osd epoch " << epoch << dendl;
blacklisted = true;
- for (std::map<ceph_tid_t, MetaRequest*>::iterator p = mds_requests.begin();
- p != mds_requests.end(); ) {
- auto req = p->second;
- ++p;
- req->abort(-EBLACKLISTED);
- if (req->caller_cond) {
- req->kick = true;
- req->caller_cond->Signal();
- }
- }
- // Progress aborts on any requests that were on this waitlist. Any
- // requests that were on a waiting_for_open session waitlist
- // will get kicked during close session below.
- signal_cond_list(waiting_for_mdsmap);
-
- // Force-close all sessions: assume this is not abandoning any state
- // on the MDS side because the MDS will have seen the blacklist too.
- while(!mds_sessions.empty()) {
- auto i = mds_sessions.begin();
- auto session = i->second;
- _closed_mds_session(session);
- }
+ _abort_mds_sessions(-EBLACKLISTED);
// Since we know all our OSD ops will fail, cancel them all preemtively,
// so that on an unhealthy cluster we can umount promptly even if e.g.
} else if (blacklisted) {
// Handle case where we were blacklisted but no longer are
- blacklisted = objecter->with_osdmap([myaddr](const OSDMap &o){
- return o.is_blacklisted(myaddr);});
+ blacklisted = objecter->with_osdmap([myaddrs](const OSDMap &o){
+ return o.is_blacklisted(myaddrs);});
+ }
+
+ // Always subscribe to next osdmap for blacklisted client
+ // until this client is not blacklisted.
+ if (blacklisted) {
+ objecter->maybe_request_map();
}
if (objecter->osdmap_full_flag()) {
objecter->maybe_request_map();
}
}
-
- m->put();
}
// incoming messages
-bool Client::ms_dispatch(Message *m)
+bool Client::ms_dispatch2(const MessageRef &m)
{
- Mutex::Locker l(client_lock);
+ std::lock_guard l(client_lock);
if (!initialized) {
ldout(cct, 10) << "inactive, discarding " << *m << dendl;
- m->put();
return true;
}
switch (m->get_type()) {
// mounting and mds sessions
case CEPH_MSG_MDS_MAP:
- handle_mds_map(static_cast<MMDSMap*>(m));
+ handle_mds_map(ref_cast<MMDSMap>(m));
break;
case CEPH_MSG_FS_MAP:
- handle_fs_map(static_cast<MFSMap*>(m));
+ handle_fs_map(ref_cast<MFSMap>(m));
break;
case CEPH_MSG_FS_MAP_USER:
- handle_fs_map_user(static_cast<MFSMapUser*>(m));
+ handle_fs_map_user(ref_cast<MFSMapUser>(m));
break;
case CEPH_MSG_CLIENT_SESSION:
- handle_client_session(static_cast<MClientSession*>(m));
+ handle_client_session(ref_cast<MClientSession>(m));
break;
case CEPH_MSG_OSD_MAP:
- handle_osd_map(static_cast<MOSDMap*>(m));
+ handle_osd_map(ref_cast<MOSDMap>(m));
break;
// requests
case CEPH_MSG_CLIENT_REQUEST_FORWARD:
- handle_client_request_forward(static_cast<MClientRequestForward*>(m));
+ handle_client_request_forward(ref_cast<MClientRequestForward>(m));
break;
case CEPH_MSG_CLIENT_REPLY:
- handle_client_reply(static_cast<MClientReply*>(m));
+ handle_client_reply(ref_cast<MClientReply>(m));
+ break;
+
+ // reclaim reply
+ case CEPH_MSG_CLIENT_RECLAIM_REPLY:
+ handle_client_reclaim_reply(ref_cast<MClientReclaimReply>(m));
break;
case CEPH_MSG_CLIENT_SNAP:
- handle_snap(static_cast<MClientSnap*>(m));
+ handle_snap(ref_cast<MClientSnap>(m));
break;
case CEPH_MSG_CLIENT_CAPS:
- handle_caps(static_cast<MClientCaps*>(m));
+ handle_caps(ref_cast<MClientCaps>(m));
break;
case CEPH_MSG_CLIENT_LEASE:
- handle_lease(static_cast<MClientLease*>(m));
+ handle_lease(ref_cast<MClientLease>(m));
break;
case MSG_COMMAND_REPLY:
if (m->get_source().type() == CEPH_ENTITY_TYPE_MDS) {
- handle_command_reply(static_cast<MCommandReply*>(m));
+ handle_command_reply(ref_cast<MCommandReply>(m));
} else {
return false;
}
break;
case CEPH_MSG_CLIENT_QUOTA:
- handle_quota(static_cast<MClientQuota*>(m));
+ handle_quota(ref_cast<MClientQuota>(m));
break;
default:
trim_cache();
if (size < lru.lru_get_size() + inode_map.size()) {
ldout(cct, 10) << "unmounting: trim pass, cache shrank, poking unmount()" << dendl;
- mount_cond.Signal();
+ mount_cond.notify_all();
} else {
ldout(cct, 10) << "unmounting: trim pass, size still " << lru.lru_get_size()
<< "+" << inode_map.size() << dendl;
return true;
}
-void Client::handle_fs_map(MFSMap *m)
+void Client::handle_fs_map(const MConstRef<MFSMap>& m)
{
fsmap.reset(new FSMap(m->get_fsmap()));
- m->put();
signal_cond_list(waiting_for_fsmap);
monclient->sub_got("fsmap", fsmap->get_epoch());
}
-void Client::handle_fs_map_user(MFSMapUser *m)
+void Client::handle_fs_map_user(const MConstRef<MFSMapUser>& m)
{
fsmap_user.reset(new FSMapUser);
*fsmap_user = m->get_fsmap();
- m->put();
monclient->sub_got("fsmap.user", fsmap_user->get_epoch());
signal_cond_list(waiting_for_fsmap);
}
-void Client::handle_mds_map(MMDSMap* m)
+void Client::handle_mds_map(const MConstRef<MMDSMap>& m)
{
+ mds_gid_t old_inc, new_inc;
if (m->get_epoch() <= mdsmap->get_epoch()) {
- ldout(cct, 1) << "handle_mds_map epoch " << m->get_epoch()
+ ldout(cct, 1) << __func__ << " epoch " << m->get_epoch()
<< " is identical to or older than our "
<< mdsmap->get_epoch() << dendl;
- m->put();
return;
- }
+ }
- ldout(cct, 1) << "handle_mds_map epoch " << m->get_epoch() << dendl;
+ ldout(cct, 1) << __func__ << " epoch " << m->get_epoch() << dendl;
std::unique_ptr<MDSMap> oldmap(new MDSMap);
oldmap.swap(mdsmap);
}
// reset session
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end(); ) {
+ for (auto p = mds_sessions.begin(); p != mds_sessions.end(); ) {
mds_rank_t mds = p->first;
- MetaSession *session = p->second;
+ MetaSession *session = &p->second;
++p;
int oldstate = oldmap->get_state(mds);
int newstate = mdsmap->get_state(mds);
if (!mdsmap->is_up(mds)) {
session->con->mark_down();
- } else if (mdsmap->get_inst(mds) != session->inst) {
+ } else if (mdsmap->get_addrs(mds) != session->addrs) {
+ old_inc = oldmap->get_incarnation(mds);
+ new_inc = mdsmap->get_incarnation(mds);
+ if (old_inc != new_inc) {
+ ldout(cct, 1) << "mds incarnation changed from "
+ << old_inc << " to " << new_inc << dendl;
+ oldstate = MDSMap::STATE_NULL;
+ }
session->con->mark_down();
- session->inst = mdsmap->get_inst(mds);
+ session->addrs = mdsmap->get_addrs(mds);
// When new MDS starts to take over, notify kernel to trim unused entries
// in its dcache/icache. Hopefully, the kernel will release some unused
// inodes before the new MDS enters reconnect state.
session->mds_state = newstate;
if (newstate == MDSMap::STATE_RECONNECT) {
- session->con = messenger->get_connection(session->inst);
+ session->con = messenger->connect_to_mds(session->addrs);
send_reconnect(session);
- } else if (newstate >= MDSMap::STATE_ACTIVE) {
- if (oldstate < MDSMap::STATE_ACTIVE) {
- // kick new requests
- kick_requests(session);
- kick_flushing_caps(session);
- signal_context_list(session->waiting_for_open);
- kick_maxsize_requests(session);
- wake_inode_waiters(session);
+ } else if (newstate > MDSMap::STATE_RECONNECT) {
+ if (oldstate < MDSMap::STATE_RECONNECT) {
+ ldout(cct, 1) << "we may miss the MDSMap::RECONNECT, close mds session ... " << dendl;
+ _closed_mds_session(session);
+ continue;
+ }
+ if (newstate >= MDSMap::STATE_ACTIVE) {
+ if (oldstate < MDSMap::STATE_ACTIVE) {
+ // kick new requests
+ kick_requests(session);
+ kick_flushing_caps(session);
+ signal_context_list(session->waiting_for_open);
+ wake_up_session_caps(session, true);
+ }
+ connect_mds_targets(mds);
}
- connect_mds_targets(mds);
} else if (newstate == MDSMap::STATE_NULL &&
mds >= mdsmap->get_max_mds()) {
_closed_mds_session(session);
// kick any waiting threads
signal_cond_list(waiting_for_mdsmap);
- m->put();
-
monclient->sub_got("mdsmap", mdsmap->get_epoch());
}
void Client::send_reconnect(MetaSession *session)
{
mds_rank_t mds = session->mds_num;
- ldout(cct, 10) << "send_reconnect to mds." << mds << dendl;
+ ldout(cct, 10) << __func__ << " to mds." << mds << dendl;
// trim unused caps to reduce MDS's cache rejoin time
trim_cache_for_reconnect(session);
session->readonly = false;
- if (session->release) {
- session->release->put();
- session->release = NULL;
- }
+ session->release.reset();
// reset my cap seq number
session->seq = 0;
//make sure unsafe requests get saved
resend_unsafe_requests(session);
- MClientReconnect *m = new MClientReconnect;
+ early_kick_flushing_caps(session);
+
+ auto m = make_message<MClientReconnect>();
+ bool allow_multi = session->mds_features.test(CEPHFS_FEATURE_MULTI_RECONNECT);
// i have an open session.
ceph::unordered_set<inodeno_t> did_snaprealm;
p != inode_map.end();
++p) {
Inode *in = p->second;
- if (in->caps.count(mds)) {
+ auto it = in->caps.find(mds);
+ if (it != in->caps.end()) {
+ if (allow_multi &&
+ m->get_approx_size() >=
+ static_cast<size_t>((std::numeric_limits<int>::max() >> 1))) {
+ m->mark_more();
+ session->con->send_message2(std::move(m));
+
+ m = make_message<MClientReconnect>();
+ }
+
+ Cap &cap = it->second;
ldout(cct, 10) << " caps on " << p->first
- << " " << ccap_string(in->caps[mds]->issued)
+ << " " << ccap_string(cap.issued)
<< " wants " << ccap_string(in->caps_wanted())
<< dendl;
filepath path;
- in->make_long_path(path);
+ in->make_short_path(path);
ldout(cct, 10) << " path " << path << dendl;
bufferlist flockbl;
_encode_filelocks(in, flockbl);
- Cap *cap = in->caps[mds];
- cap->seq = 0; // reset seq.
- cap->issue_seq = 0; // reset seq.
- cap->mseq = 0; // reset seq.
- cap->issued = cap->implemented;
-
+ cap.seq = 0; // reset seq.
+ cap.issue_seq = 0; // reset seq.
+ cap.mseq = 0; // reset seq.
+ // cap gen should catch up with session cap_gen
+ if (cap.gen < session->cap_gen) {
+ cap.gen = session->cap_gen;
+ cap.issued = cap.implemented = CEPH_CAP_PIN;
+ } else {
+ cap.issued = cap.implemented;
+ }
snapid_t snap_follows = 0;
if (!in->cap_snaps.empty())
snap_follows = in->cap_snaps.begin()->first;
m->add_cap(p->first.ino,
- cap->cap_id,
+ cap.cap_id,
path.get_ino(), path.get_path(), // ino
in->caps_wanted(), // wanted
- cap->issued, // issued
+ cap.issued, // issued
in->snaprealm->ino,
snap_follows,
flockbl);
}
}
- early_kick_flushing_caps(session);
+ if (!allow_multi)
+ m->set_encoding_version(0); // use connection features to choose encoding
+ session->con->send_message2(std::move(m));
- session->con->send_message(m);
+ mount_cond.notify_all();
- mount_cond.Signal();
+ if (session->reclaim_state == MetaSession::RECLAIMING)
+ signal_cond_list(waiting_for_reclaim);
}
void Client::kick_requests(MetaSession *session)
{
- ldout(cct, 10) << "kick_requests for mds." << session->mds_num << dendl;
+ ldout(cct, 10) << __func__ << " for mds." << session->mds_num << dendl;
for (map<ceph_tid_t, MetaRequest*>::iterator p = mds_requests.begin();
p != mds_requests.end();
++p) {
if (req->aborted()) {
if (req->caller_cond) {
req->kick = true;
- req->caller_cond->Signal();
+ req->caller_cond->notify_all();
}
continue;
}
void Client::wait_unsafe_requests()
{
list<MetaRequest*> last_unsafe_reqs;
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end();
- ++p) {
- MetaSession *s = p->second;
- if (!s->unsafe_requests.empty()) {
- MetaRequest *req = s->unsafe_requests.back();
+ for (const auto &p : mds_sessions) {
+ const MetaSession &s = p.second;
+ if (!s.unsafe_requests.empty()) {
+ MetaRequest *req = s.unsafe_requests.back();
req->get();
last_unsafe_reqs.push_back(req);
}
void Client::kick_requests_closed(MetaSession *session)
{
- ldout(cct, 10) << "kick_requests_closed for mds." << session->mds_num << dendl;
+ ldout(cct, 10) << __func__ << " for mds." << session->mds_num << dendl;
for (map<ceph_tid_t, MetaRequest*>::iterator p = mds_requests.begin();
p != mds_requests.end(); ) {
MetaRequest *req = p->second;
if (req->mds == session->mds_num) {
if (req->caller_cond) {
req->kick = true;
- req->caller_cond->Signal();
+ req->caller_cond->notify_all();
}
req->item.remove_myself();
if (req->got_unsafe) {
- lderr(cct) << "kick_requests_closed removing unsafe request " << req->get_tid() << dendl;
+ lderr(cct) << __func__ << " removing unsafe request " << req->get_tid() << dendl;
req->unsafe_item.remove_myself();
- req->unsafe_dir_item.remove_myself();
- req->unsafe_target_item.remove_myself();
+ if (is_dir_operation(req)) {
+ Inode *dir = req->inode();
+ assert(dir);
+ dir->set_async_err(-EIO);
+ lderr(cct) << "kick_requests_closed drop req of inode(dir) : "
+ << dir->ino << " " << req->get_tid() << dendl;
+ req->unsafe_dir_item.remove_myself();
+ }
+ if (req->target) {
+ InodeRef &in = req->target;
+ in->set_async_err(-EIO);
+ lderr(cct) << "kick_requests_closed drop req of inode : "
+ << in->ino << " " << req->get_tid() << dendl;
+ req->unsafe_target_item.remove_myself();
+ }
signal_cond_list(req->waitfor_safe);
unregister_request(req);
}
}
}
- assert(session->requests.empty());
- assert(session->unsafe_requests.empty());
+ ceph_assert(session->requests.empty());
+ ceph_assert(session->unsafe_requests.empty());
}
s->seq++;
ldout(cct, 10) << " mds." << s->mds_num << " seq now " << s->seq << dendl;
if (s->state == MetaSession::STATE_CLOSING) {
- s->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_CLOSE, s->seq));
+ s->con->send_message2(make_message<MClientSession>(CEPH_SESSION_REQUEST_CLOSE, s->seq));
}
}
-void Client::handle_lease(MClientLease *m)
+void Client::handle_lease(const MConstRef<MClientLease>& m)
{
- ldout(cct, 10) << "handle_lease " << *m << dendl;
+ ldout(cct, 10) << __func__ << " " << *m << dendl;
- assert(m->get_action() == CEPH_MDS_LEASE_REVOKE);
+ ceph_assert(m->get_action() == CEPH_MDS_LEASE_REVOKE);
mds_rank_t mds = mds_rank_t(m->get_source().num());
MetaSession *session = _get_mds_session(mds, m->get_connection().get());
if (!session) {
- m->put();
return;
}
}
in = inode_map[vino];
- if (m->get_mask() & CEPH_LOCK_DN) {
+ if (m->get_mask() & CEPH_LEASE_VALID) {
if (!in->dir || in->dir->dentries.count(m->dname) == 0) {
ldout(cct, 10) << " don't have dir|dentry " << m->get_ino() << "/" << m->dname <<dendl;
goto revoke;
}
revoke:
- m->get_connection()->send_message(
- new MClientLease(
- CEPH_MDS_LEASE_RELEASE, seq,
- m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname));
- m->put();
+ {
+ auto reply = make_message<MClientLease>(CEPH_MDS_LEASE_RELEASE, seq,
+ m->get_mask(), m->get_ino(),
+ m->get_first(), m->get_last(), m->dname);
+ m->get_connection()->send_message2(std::move(reply));
+ }
}
void Client::put_inode(Inode *in, int n)
{
- ldout(cct, 10) << "put_inode on " << *in << dendl;
+ ldout(cct, 10) << __func__ << " on " << *in << dendl;
int left = in->_put(n);
if (left == 0) {
// release any caps
remove_all_caps(in);
- ldout(cct, 10) << "put_inode deleting " << *in << dendl;
+ ldout(cct, 10) << __func__ << " deleting " << *in << dendl;
bool unclean = objectcacher->release_set(&in->oset);
- assert(!unclean);
+ ceph_assert(!unclean);
inode_map.erase(in->vino());
if (use_faked_inos())
_release_faked_ino(in);
void Client::close_dir(Dir *dir)
{
Inode *in = dir->parent_inode;
- ldout(cct, 15) << "close_dir dir " << dir << " on " << in << dendl;
- assert(dir->is_empty());
- assert(in->dir == dir);
- assert(in->dn_set.size() < 2); // dirs can't be hard-linked
- if (!in->dn_set.empty())
+ ldout(cct, 15) << __func__ << " dir " << dir << " on " << in << dendl;
+ ceph_assert(dir->is_empty());
+ ceph_assert(in->dir == dir);
+ ceph_assert(in->dentries.size() < 2); // dirs can't be hard-linked
+ if (!in->dentries.empty())
in->get_first_parent()->put(); // unpin dentry
delete in->dir;
{
if (!dn) {
// create a new Dentry
- dn = new Dentry;
- dn->name = name;
-
- // link to dir
- dn->dir = dir;
- dir->dentries[dn->name] = dn;
+ dn = new Dentry(dir, name);
+
lru.lru_insert_mid(dn); // mid or top?
- if (!in)
- dir->num_null_dentries++;
ldout(cct, 15) << "link dir " << dir->parent_inode << " '" << name << "' to inode " << in
<< " dn " << dn << " (new dn)" << dendl;
} else {
- assert(!dn->inode);
- if (in)
- dir->num_null_dentries--;
+ ceph_assert(!dn->inode);
ldout(cct, 15) << "link dir " << dir->parent_inode << " '" << name << "' to inode " << in
<< " dn " << dn << " (old dn)" << dendl;
}
if (in) { // link to inode
- dn->inode = in;
- if (in->is_dir()) {
- if (in->dir)
- dn->get(); // dir -> dn pin
- if (in->ll_ref)
- dn->get(); // ll_ref -> dn pin
- }
-
- assert(in->dn_set.count(dn) == 0);
-
+ InodeRef tmp_ref;
// only one parent for directories!
- if (in->is_dir() && !in->dn_set.empty()) {
+ if (in->is_dir() && !in->dentries.empty()) {
+ tmp_ref = in; // prevent unlink below from freeing the inode.
Dentry *olddn = in->get_first_parent();
- assert(olddn->dir != dir || olddn->name != name);
+ ceph_assert(olddn->dir != dir || olddn->name != name);
Inode *old_diri = olddn->dir->parent_inode;
- old_diri->dir_release_count++;
clear_dir_complete_and_ordered(old_diri, true);
unlink(olddn, true, true); // keep dir, dentry
}
- in->dn_set.insert(dn);
-
- ldout(cct, 20) << "link inode " << in << " parents now " << in->dn_set << dendl;
+ dn->link(in);
+ ldout(cct, 20) << "link inode " << in << " parents now " << in->dentries << dendl;
}
return dn;
void Client::unlink(Dentry *dn, bool keepdir, bool keepdentry)
{
- InodeRef in;
- in.swap(dn->inode);
+ InodeRef in(dn->inode);
ldout(cct, 15) << "unlink dir " << dn->dir->parent_inode << " '" << dn->name << "' dn " << dn
<< " inode " << dn->inode << dendl;
// unlink from inode
- if (in) {
- if (in->is_dir()) {
- if (in->dir)
- dn->put(); // dir -> dn pin
- if (in->ll_ref)
- dn->put(); // ll_ref -> dn pin
- }
- dn->inode = 0;
- assert(in->dn_set.count(dn));
- in->dn_set.erase(dn);
- ldout(cct, 20) << "unlink inode " << in << " parents now " << in->dn_set << dendl;
+ if (dn->inode) {
+ dn->unlink();
+ ldout(cct, 20) << "unlink inode " << in << " parents now " << in->dentries << dendl;
}
if (keepdentry) {
dn->lease_mds = -1;
- if (in)
- dn->dir->num_null_dentries++;
} else {
ldout(cct, 15) << "unlink removing '" << dn->name << "' dn " << dn << dendl;
// unlink from dir
- dn->dir->dentries.erase(dn->name);
- if (!in)
- dn->dir->num_null_dentries--;
- if (dn->dir->is_empty() && !keepdir)
- close_dir(dn->dir);
- dn->dir = 0;
+ Dir *dir = dn->dir;
+ dn->detach();
// delete den
lru.lru_remove(dn);
dn->put();
+
+ if (dir->is_empty() && !keepdir)
+ close_dir(dir);
}
}
public:
C_Client_FlushComplete(Client *c, Inode *in) : client(c), inode(in) { }
void finish(int r) override {
- assert(client->client_lock.is_locked_by_me());
+ ceph_assert(ceph_mutex_is_locked_by_me(client->client_lock));
if (r != 0) {
client_t const whoami = client->whoami; // For the benefit of ldout prefix
ldout(client->cct, 1) << "I/O error from flush on inode " << inode
{
if ((cap & CEPH_CAP_FILE_BUFFER) &&
in->cap_refs[CEPH_CAP_FILE_BUFFER] == 0) {
- ldout(cct, 5) << "get_cap_ref got first FILE_BUFFER ref on " << *in << dendl;
+ ldout(cct, 5) << __func__ << " got first FILE_BUFFER ref on " << *in << dendl;
in->get();
}
if ((cap & CEPH_CAP_FILE_CACHE) &&
in->cap_refs[CEPH_CAP_FILE_CACHE] == 0) {
- ldout(cct, 5) << "get_cap_ref got first FILE_CACHE ref on " << *in << dendl;
+ ldout(cct, 5) << __func__ << " got first FILE_CACHE ref on " << *in << dendl;
in->get();
}
in->get_cap_ref(cap);
int put_nref = 0;
int drop = last & ~in->caps_issued();
if (in->snapid == CEPH_NOSNAP) {
- if ((last & CEPH_CAP_FILE_WR) &&
+ if ((last & (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER)) &&
!in->cap_snaps.empty() &&
in->cap_snaps.rbegin()->second.writing) {
- ldout(cct, 10) << "put_cap_ref finishing pending cap_snap on " << *in << dendl;
+ ldout(cct, 10) << __func__ << " finishing pending cap_snap on " << *in << dendl;
in->cap_snaps.rbegin()->second.writing = 0;
finish_cap_snap(in, in->cap_snaps.rbegin()->second, get_caps_used(in));
signal_cond_list(in->waitfor_caps); // wake up blocked sync writers
for (auto &p : in->cap_snaps)
p.second.dirty_data = 0;
signal_cond_list(in->waitfor_commit);
- ldout(cct, 5) << "put_cap_ref dropped last FILE_BUFFER ref on " << *in << dendl;
+ ldout(cct, 5) << __func__ << " dropped last FILE_BUFFER ref on " << *in << dendl;
++put_nref;
}
}
if (last & CEPH_CAP_FILE_CACHE) {
- ldout(cct, 5) << "put_cap_ref dropped last FILE_CACHE ref on " << *in << dendl;
+ ldout(cct, 5) << __func__ << " dropped last FILE_CACHE ref on " << *in << dendl;
++put_nref;
}
if (drop)
}
}
-int Client::get_caps(Inode *in, int need, int want, int *phave, loff_t endoff)
+int Client::get_caps(Fh *fh, int need, int want, int *phave, loff_t endoff)
{
+ Inode *in = fh->inode.get();
+
int r = check_pool_perm(in, need);
if (r < 0)
return r;
return -EBADF;
}
+ if ((fh->mode & CEPH_FILE_MODE_WR) && fh->gen != fd_gen)
+ return -EBADF;
+
+ if ((in->flags & I_ERROR_FILELOCK) && fh->has_any_filelocks())
+ return -EIO;
+
int implemented;
int have = in->caps_issued(&implemented);
bool waitfor_commit = false;
if (have & need & CEPH_CAP_FILE_WR) {
- if (endoff > 0 &&
- (endoff >= (loff_t)in->max_size ||
- endoff > (loff_t)(in->size << 1)) &&
- endoff > (loff_t)in->wanted_max_size) {
- ldout(cct, 10) << "wanted_max_size " << in->wanted_max_size << " -> " << endoff << dendl;
- in->wanted_max_size = endoff;
- check_caps(in, 0);
+ if (endoff > 0) {
+ if ((endoff >= (loff_t)in->max_size ||
+ endoff > (loff_t)(in->size << 1)) &&
+ endoff > (loff_t)in->wanted_max_size) {
+ ldout(cct, 10) << "wanted_max_size " << in->wanted_max_size << " -> " << endoff << dendl;
+ in->wanted_max_size = endoff;
+ }
+ if (in->wanted_max_size > in->max_size &&
+ in->wanted_max_size > in->requested_max_size)
+ check_caps(in, 0);
}
if (endoff >= 0 && endoff > (loff_t)in->max_size) {
return ret;
continue;
}
- if ((mds_wanted & file_wanted) ==
- (file_wanted & (CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR))) {
+ if (!(file_wanted & ~mds_wanted))
in->flags &= ~I_CAP_DROPPED;
- }
}
if (waitfor_caps)
void Client::cap_delay_requeue(Inode *in)
{
- ldout(cct, 10) << "cap_delay_requeue on " << *in << dendl;
+ ldout(cct, 10) << __func__ << " on " << *in << dendl;
in->hold_caps_until = ceph_clock_now();
in->hold_caps_until += cct->_conf->client_caps_release_delay;
delayed_list.push_back(&in->delay_cap_item);
}
void Client::send_cap(Inode *in, MetaSession *session, Cap *cap,
- bool sync, int used, int want, int retain,
+ int flags, int used, int want, int retain,
int flush, ceph_tid_t flush_tid)
{
int held = cap->issued | cap->implemented;
int dropping = cap->issued & ~retain;
int op = CEPH_CAP_OP_UPDATE;
- ldout(cct, 10) << "send_cap " << *in
+ ldout(cct, 10) << __func__ << " " << *in
<< " mds." << session->mds_num << " seq " << cap->seq
- << (sync ? " sync " : " async ")
<< " used " << ccap_string(used)
<< " want " << ccap_string(want)
<< " flush " << ccap_string(flush)
if (flush)
follows = in->snaprealm->get_snap_context().seq;
- MClientCaps *m = new MClientCaps(op,
+ auto m = make_message<MClientCaps>(op,
in->ino,
0,
cap->cap_id, cap->seq,
m->head.nlink = in->nlink;
if (flush & CEPH_CAP_XATTR_EXCL) {
- ::encode(in->xattrs, m->xattrbl);
+ encode(in->xattrs, m->xattrbl);
m->head.xattr_version = in->xattr_version;
}
m->btime = in->btime;
m->time_warp_seq = in->time_warp_seq;
m->change_attr = in->change_attr;
- if (sync)
- m->flags |= CLIENT_CAPS_SYNC;
-
+
+ if (!(flags & MClientCaps::FLAG_PENDING_CAPSNAP) &&
+ !in->cap_snaps.empty() &&
+ in->cap_snaps.rbegin()->second.flush_tid == 0)
+ flags |= MClientCaps::FLAG_PENDING_CAPSNAP;
+ m->flags = flags;
+
if (flush & CEPH_CAP_FILE_WR) {
m->inline_version = in->inline_version;
m->inline_data = in->inline_data;
m->set_snap_follows(follows);
cap->wanted = want;
if (cap == in->auth_cap) {
- m->set_max_size(in->wanted_max_size);
- in->requested_max_size = in->wanted_max_size;
- ldout(cct, 15) << "auth cap, setting max_size = " << in->requested_max_size << dendl;
+ if (want & CEPH_CAP_ANY_FILE_WR) {
+ m->set_max_size(in->wanted_max_size);
+ in->requested_max_size = in->wanted_max_size;
+ ldout(cct, 15) << "auth cap, requesting max_size " << in->requested_max_size << dendl;
+ } else {
+ in->requested_max_size = 0;
+ ldout(cct, 15) << "auth cap, reset requested_max_size due to not wanting any file write cap" << dendl;
+ }
}
if (!session->flushing_caps_tids.empty())
m->set_oldest_flush_tid(*session->flushing_caps_tids.begin());
- session->con->send_message(m);
+ session->con->send_message2(std::move(m));
}
static bool is_max_size_approaching(Inode *in)
return false;
}
+static int adjust_caps_used_for_lazyio(int used, int issued, int implemented)
+{
+ if (!(used & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_BUFFER)))
+ return used;
+ if (!(implemented & CEPH_CAP_FILE_LAZYIO))
+ return used;
+
+ if (issued & CEPH_CAP_FILE_LAZYIO) {
+ if (!(issued & CEPH_CAP_FILE_CACHE)) {
+ used &= ~CEPH_CAP_FILE_CACHE;
+ used |= CEPH_CAP_FILE_LAZYIO;
+ }
+ if (!(issued & CEPH_CAP_FILE_BUFFER)) {
+ used &= ~CEPH_CAP_FILE_BUFFER;
+ used |= CEPH_CAP_FILE_LAZYIO;
+ }
+ } else {
+ if (!(implemented & CEPH_CAP_FILE_CACHE)) {
+ used &= ~CEPH_CAP_FILE_CACHE;
+ used |= CEPH_CAP_FILE_LAZYIO;
+ }
+ if (!(implemented & CEPH_CAP_FILE_BUFFER)) {
+ used &= ~CEPH_CAP_FILE_BUFFER;
+ used |= CEPH_CAP_FILE_LAZYIO;
+ }
+ }
+ return used;
+}
+
/**
* check_caps
*
unsigned used = get_caps_used(in);
unsigned cap_used;
- if (in->is_dir() && (in->flags & I_COMPLETE)) {
- // we do this here because we don't want to drop to Fs (and then
- // drop the Fs if we do a create!) if that alone makes us send lookups
- // to the MDS. Doing it in in->caps_wanted() has knock-on effects elsewhere
- wanted |= CEPH_CAP_FILE_EXCL;
- }
-
int implemented;
int issued = in->caps_issued(&implemented);
int revoking = implemented & ~issued;
+ int orig_used = used;
+ used = adjust_caps_used_for_lazyio(used, issued, implemented);
+
int retain = wanted | used | CEPH_CAP_PIN;
- if (!unmounting) {
- if (wanted)
+ if (!unmounting && in->nlink > 0) {
+ if (wanted) {
retain |= CEPH_CAP_ANY;
- else
+ } else if (in->is_dir() &&
+ (issued & CEPH_CAP_FILE_SHARED) &&
+ (in->flags & I_COMPLETE)) {
+ // we do this here because we don't want to drop to Fs (and then
+ // drop the Fs if we do a create!) if that alone makes us send lookups
+ // to the MDS. Doing it in in->caps_wanted() has knock-on effects elsewhere
+ wanted = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
+ retain |= wanted;
+ } else {
retain |= CEPH_CAP_ANY_SHARED;
+ // keep RD only if we didn't have the file open RW,
+ // because then the mds would revoke it anyway to
+ // journal max_size=0.
+ if (in->max_size == 0)
+ retain |= CEPH_CAP_ANY_RD;
+ }
}
- ldout(cct, 10) << "check_caps on " << *in
+ ldout(cct, 10) << __func__ << " on " << *in
<< " wanted " << ccap_string(wanted)
<< " used " << ccap_string(used)
<< " issued " << ccap_string(issued)
if (in->caps.empty())
return; // guard if at end of func
- if ((revoking & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) &&
- (used & CEPH_CAP_FILE_CACHE) && !(used & CEPH_CAP_FILE_BUFFER)) {
+ if (!(orig_used & CEPH_CAP_FILE_BUFFER) &&
+ (revoking & used & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO))) {
if (_release(in))
- used &= ~CEPH_CAP_FILE_CACHE;
+ used &= ~(CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO);
}
- if (!in->cap_snaps.empty())
- flush_snaps(in);
-
- if (flags & CHECK_CAPS_NODELAY)
- in->hold_caps_until = utime_t();
- else
- cap_delay_requeue(in);
-
- utime_t now = ceph_clock_now();
- map<mds_rank_t, Cap*>::iterator it = in->caps.begin();
- while (it != in->caps.end()) {
- mds_rank_t mds = it->first;
- Cap *cap = it->second;
- ++it;
+ for (auto &p : in->caps) {
+ mds_rank_t mds = p.first;
+ Cap &cap = p.second;
- MetaSession *session = mds_sessions[mds];
- assert(session);
+ MetaSession *session = &mds_sessions.at(mds);
cap_used = used;
- if (in->auth_cap && cap != in->auth_cap)
+ if (in->auth_cap && &cap != in->auth_cap)
cap_used &= ~in->auth_cap->issued;
- revoking = cap->implemented & ~cap->issued;
+ revoking = cap.implemented & ~cap.issued;
ldout(cct, 10) << " cap mds." << mds
- << " issued " << ccap_string(cap->issued)
- << " implemented " << ccap_string(cap->implemented)
+ << " issued " << ccap_string(cap.issued)
+ << " implemented " << ccap_string(cap.implemented)
<< " revoking " << ccap_string(revoking) << dendl;
if (in->wanted_max_size > in->max_size &&
in->wanted_max_size > in->requested_max_size &&
- cap == in->auth_cap)
+ &cap == in->auth_cap)
goto ack;
/* approaching file_max? */
- if ((cap->issued & CEPH_CAP_FILE_WR) &&
- cap == in->auth_cap &&
+ if ((cap.issued & CEPH_CAP_FILE_WR) &&
+ &cap == in->auth_cap &&
is_max_size_approaching(in)) {
ldout(cct, 10) << "size " << in->size << " approaching max_size " << in->max_size
<< ", reported " << in->reported_size << dendl;
/* completed revocation? */
if (revoking && (revoking & cap_used) == 0) {
- ldout(cct, 10) << "completed revocation of " << ccap_string(cap->implemented & ~cap->issued) << dendl;
+ ldout(cct, 10) << "completed revocation of " << ccap_string(cap.implemented & ~cap.issued) << dendl;
goto ack;
}
/* want more caps from mds? */
- if (wanted & ~(cap->wanted | cap->issued))
+ if (wanted & ~(cap.wanted | cap.issued))
goto ack;
if (!revoking && unmounting && (cap_used == 0))
goto ack;
- if (wanted == cap->wanted && // mds knows what we want.
- ((cap->issued & ~retain) == 0) &&// and we don't have anything we wouldn't like
- !in->dirty_caps) // and we have no dirty caps
+ if ((cap.issued & ~retain) == 0 && // and we don't have anything we wouldn't like
+ !in->dirty_caps) // and we have no dirty caps
continue;
- if (now < in->hold_caps_until) {
+ if (!(flags & CHECK_CAPS_NODELAY)) {
ldout(cct, 10) << "delaying cap release" << dendl;
+ cap_delay_requeue(in);
continue;
}
ack:
- // re-send old cap/snapcap flushes first.
- if (session->mds_state >= MDSMap::STATE_RECONNECT &&
- session->mds_state < MDSMap::STATE_ACTIVE &&
- session->early_flushing_caps.count(in) == 0) {
- ldout(cct, 20) << " reflushing caps (check_caps) on " << *in
- << " to mds." << session->mds_num << dendl;
- session->early_flushing_caps.insert(in);
- if (in->cap_snaps.size())
- flush_snaps(in, true);
- if (in->flushing_caps)
- flush_caps(in, session, flags & CHECK_CAPS_SYNCHRONOUS);
+ if (&cap == in->auth_cap) {
+ if (in->flags & I_KICK_FLUSH) {
+ ldout(cct, 20) << " reflushing caps (check_caps) on " << *in
+ << " to mds." << mds << dendl;
+ kick_flushing_caps(in, session);
+ }
+ if (!in->cap_snaps.empty() &&
+ in->cap_snaps.rbegin()->second.flush_tid == 0)
+ flush_snaps(in);
}
int flushing;
+ int msg_flags = 0;
ceph_tid_t flush_tid;
- if (in->auth_cap == cap && in->dirty_caps) {
+ if (in->auth_cap == &cap && in->dirty_caps) {
flushing = mark_caps_flushing(in, &flush_tid);
+ if (flags & CHECK_CAPS_SYNCHRONOUS)
+ msg_flags |= MClientCaps::FLAG_SYNC;
} else {
flushing = 0;
flush_tid = 0;
}
- send_cap(in, session, cap, flags & CHECK_CAPS_SYNCHRONOUS, cap_used, wanted,
- retain, flushing, flush_tid);
+ send_cap(in, session, &cap, msg_flags, cap_used, wanted, retain,
+ flushing, flush_tid);
}
}
{
int used = get_caps_used(in);
int dirty = in->caps_dirty();
- ldout(cct, 10) << "queue_cap_snap " << *in << " snapc " << old_snapc << " used " << ccap_string(used) << dendl;
+ ldout(cct, 10) << __func__ << " " << *in << " snapc " << old_snapc << " used " << ccap_string(used) << dendl;
if (in->cap_snaps.size() &&
in->cap_snaps.rbegin()->second.writing) {
- ldout(cct, 10) << "queue_cap_snap already have pending cap_snap on " << *in << dendl;
+ ldout(cct, 10) << __func__ << " already have pending cap_snap on " << *in << dendl;
return;
} else if (in->caps_dirty() ||
(used & CEPH_CAP_FILE_WR) ||
(dirty & CEPH_CAP_ANY_WR)) {
const auto &capsnapem = in->cap_snaps.emplace(std::piecewise_construct, std::make_tuple(old_snapc.seq), std::make_tuple(in));
- assert(capsnapem.second == true); /* element inserted */
+ ceph_assert(capsnapem.second); /* element inserted */
CapSnap &capsnap = capsnapem.first->second;
capsnap.context = old_snapc;
capsnap.issued = in->caps_issued();
capsnap.dirty = in->caps_dirty();
-
+
capsnap.dirty_data = (used & CEPH_CAP_FILE_BUFFER);
-
+
capsnap.uid = in->uid;
capsnap.gid = in->gid;
capsnap.mode = in->mode;
capsnap.btime = in->btime;
capsnap.xattrs = in->xattrs;
capsnap.xattr_version = in->xattr_version;
-
+ capsnap.cap_dirtier_uid = in->cap_dirtier_uid;
+ capsnap.cap_dirtier_gid = in->cap_dirtier_gid;
+
if (used & CEPH_CAP_FILE_WR) {
- ldout(cct, 10) << "queue_cap_snap WR used on " << *in << dendl;
+ ldout(cct, 10) << __func__ << " WR used on " << *in << dendl;
capsnap.writing = 1;
} else {
finish_cap_snap(in, capsnap, used);
}
} else {
- ldout(cct, 10) << "queue_cap_snap not dirty|writing on " << *in << dendl;
+ ldout(cct, 10) << __func__ << " not dirty|writing on " << *in << dendl;
}
}
void Client::finish_cap_snap(Inode *in, CapSnap &capsnap, int used)
{
- ldout(cct, 10) << "finish_cap_snap " << *in << " capsnap " << (void *)&capsnap << " used " << ccap_string(used) << dendl;
+ ldout(cct, 10) << __func__ << " " << *in << " capsnap " << (void *)&capsnap << " used " << ccap_string(used) << dendl;
capsnap.size = in->size;
capsnap.mtime = in->mtime;
capsnap.atime = in->atime;
capsnap.ctime = in->ctime;
capsnap.time_warp_seq = in->time_warp_seq;
capsnap.change_attr = in->change_attr;
-
capsnap.dirty |= in->caps_dirty();
- if (capsnap.dirty & CEPH_CAP_FILE_WR) {
+ /* Only reset it if it wasn't set before */
+ if (capsnap.cap_dirtier_uid == -1) {
+ capsnap.cap_dirtier_uid = in->cap_dirtier_uid;
+ capsnap.cap_dirtier_gid = in->cap_dirtier_gid;
+ }
+
+ if (capsnap.dirty & CEPH_CAP_FILE_WR) {
capsnap.inline_data = in->inline_data;
capsnap.inline_version = in->inline_version;
}
if (used & CEPH_CAP_FILE_BUFFER) {
- ldout(cct, 10) << "finish_cap_snap " << *in << " cap_snap " << &capsnap << " used " << used
+ capsnap.writing = 1;
+ ldout(cct, 10) << __func__ << " " << *in << " cap_snap " << &capsnap << " used " << used
<< " WRBUFFER, delaying" << dendl;
} else {
capsnap.dirty_data = 0;
}
}
-void Client::_flushed_cap_snap(Inode *in, snapid_t seq)
+void Client::send_flush_snap(Inode *in, MetaSession *session,
+ snapid_t follows, CapSnap& capsnap)
{
- ldout(cct, 10) << "_flushed_cap_snap seq " << seq << " on " << *in << dendl;
- in->cap_snaps.at(seq).dirty_data = 0;
- flush_snaps(in);
+ auto m = make_message<MClientCaps>(CEPH_CAP_OP_FLUSHSNAP,
+ in->ino, in->snaprealm->ino, 0,
+ in->auth_cap->mseq, cap_epoch_barrier);
+ m->caller_uid = capsnap.cap_dirtier_uid;
+ m->caller_gid = capsnap.cap_dirtier_gid;
+
+ m->set_client_tid(capsnap.flush_tid);
+ m->head.snap_follows = follows;
+
+ m->head.caps = capsnap.issued;
+ m->head.dirty = capsnap.dirty;
+
+ m->head.uid = capsnap.uid;
+ m->head.gid = capsnap.gid;
+ m->head.mode = capsnap.mode;
+ m->btime = capsnap.btime;
+
+ m->size = capsnap.size;
+
+ m->head.xattr_version = capsnap.xattr_version;
+ encode(capsnap.xattrs, m->xattrbl);
+
+ m->ctime = capsnap.ctime;
+ m->btime = capsnap.btime;
+ m->mtime = capsnap.mtime;
+ m->atime = capsnap.atime;
+ m->time_warp_seq = capsnap.time_warp_seq;
+ m->change_attr = capsnap.change_attr;
+
+ if (capsnap.dirty & CEPH_CAP_FILE_WR) {
+ m->inline_version = in->inline_version;
+ m->inline_data = in->inline_data;
+ }
+
+ ceph_assert(!session->flushing_caps_tids.empty());
+ m->set_oldest_flush_tid(*session->flushing_caps_tids.begin());
+
+ session->con->send_message2(std::move(m));
}
-void Client::flush_snaps(Inode *in, bool all_again)
+void Client::flush_snaps(Inode *in)
{
- ldout(cct, 10) << "flush_snaps on " << *in << " all_again " << all_again << dendl;
- assert(in->cap_snaps.size());
+ ldout(cct, 10) << "flush_snaps on " << *in << dendl;
+ ceph_assert(in->cap_snaps.size());
// pick auth mds
- assert(in->auth_cap);
+ ceph_assert(in->auth_cap);
MetaSession *session = in->auth_cap->session;
- int mseq = in->auth_cap->mseq;
for (auto &p : in->cap_snaps) {
CapSnap &capsnap = p.second;
- if (!all_again) {
- // only flush once per session
- if (capsnap.flush_tid > 0)
- continue;
- }
+ // only do new flush
+ if (capsnap.flush_tid > 0)
+ continue;
ldout(cct, 10) << "flush_snaps mds." << session->mds_num
<< " follows " << p.first
<< " writing=" << capsnap.writing
<< " on " << *in << dendl;
if (capsnap.dirty_data || capsnap.writing)
- continue;
-
- if (capsnap.flush_tid == 0) {
- capsnap.flush_tid = ++last_flush_tid;
- if (!in->flushing_cap_item.is_on_list())
- session->flushing_caps.push_back(&in->flushing_cap_item);
- session->flushing_caps_tids.insert(capsnap.flush_tid);
- }
-
- MClientCaps *m = new MClientCaps(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, mseq,
- cap_epoch_barrier);
- if (user_id >= 0)
- m->caller_uid = user_id;
- if (group_id >= 0)
- m->caller_gid = group_id;
-
- m->set_client_tid(capsnap.flush_tid);
- m->head.snap_follows = p.first;
-
- m->head.caps = capsnap.issued;
- m->head.dirty = capsnap.dirty;
-
- m->head.uid = capsnap.uid;
- m->head.gid = capsnap.gid;
- m->head.mode = capsnap.mode;
- m->btime = capsnap.btime;
-
- m->size = capsnap.size;
-
- m->head.xattr_version = capsnap.xattr_version;
- ::encode(capsnap.xattrs, m->xattrbl);
-
- m->ctime = capsnap.ctime;
- m->btime = capsnap.btime;
- m->mtime = capsnap.mtime;
- m->atime = capsnap.atime;
- m->time_warp_seq = capsnap.time_warp_seq;
- m->change_attr = capsnap.change_attr;
-
- if (capsnap.dirty & CEPH_CAP_FILE_WR) {
- m->inline_version = in->inline_version;
- m->inline_data = in->inline_data;
- }
+ break;
- assert(!session->flushing_caps_tids.empty());
- m->set_oldest_flush_tid(*session->flushing_caps_tids.begin());
+ capsnap.flush_tid = ++last_flush_tid;
+ session->flushing_caps_tids.insert(capsnap.flush_tid);
+ in->flushing_cap_tids[capsnap.flush_tid] = 0;
+ if (!in->flushing_cap_item.is_on_list())
+ session->flushing_caps.push_back(&in->flushing_cap_item);
- session->con->send_message(m);
+ send_flush_snap(in, session, p.first, capsnap);
}
}
-
-
-void Client::wait_on_list(list<Cond*>& ls)
+void Client::wait_on_list(list<ceph::condition_variable*>& ls)
{
- Cond cond;
+ ceph::condition_variable cond;
ls.push_back(&cond);
- cond.Wait(client_lock);
+ std::unique_lock l{client_lock, std::adopt_lock};
+ cond.wait(l);
+ l.release();
ls.remove(&cond);
}
-void Client::signal_cond_list(list<Cond*>& ls)
+void Client::signal_cond_list(list<ceph::condition_variable*>& ls)
{
- for (list<Cond*>::iterator it = ls.begin(); it != ls.end(); ++it)
- (*it)->Signal();
+ for (auto cond : ls) {
+ cond->notify_all();
+ }
}
void Client::wait_on_context_list(list<Context*>& ls)
{
- Cond cond;
+ ceph::condition_variable cond;
bool done = false;
int r;
- ls.push_back(new C_Cond(&cond, &done, &r));
- while (!done)
- cond.Wait(client_lock);
+ ls.push_back(new C_Cond(cond, &done, &r));
+ std::unique_lock l{client_lock, std::adopt_lock};
+ cond.wait(l, [&done] { return done;});
+ l.release();
}
void Client::signal_context_list(list<Context*>& ls)
}
}
-void Client::wake_inode_waiters(MetaSession *s)
+void Client::wake_up_session_caps(MetaSession *s, bool reconnect)
{
- xlist<Cap*>::iterator iter = s->caps.begin();
- while (!iter.end()){
- signal_cond_list((*iter)->inode->waitfor_caps);
- ++iter;
+ for (const auto &cap : s->caps) {
+ auto &in = cap->inode;
+ if (reconnect) {
+ in.requested_max_size = 0;
+ in.wanted_max_size = 0;
+ } else {
+ if (cap->gen < s->cap_gen) {
+ // mds did not re-issue stale cap.
+ cap->issued = cap->implemented = CEPH_CAP_PIN;
+ // make sure mds knows what we want.
+ if (in.caps_file_wanted() & ~cap->wanted)
+ in.flags |= I_CAP_DROPPED;
+ }
+ }
+ signal_cond_list(in.waitfor_caps);
}
}
}
void finish(int r) override {
// _async_invalidate takes the lock when it needs to, call this back from outside of lock.
- assert(!client->client_lock.is_locked_by_me());
+ ceph_assert(ceph_mutex_is_not_locked_by_me(client->client_lock));
client->_async_invalidate(ino, offset, length);
}
};
{
if (unmounting)
return;
- ldout(cct, 10) << "_async_invalidate " << ino << " " << off << "~" << len << dendl;
+ ldout(cct, 10) << __func__ << " " << ino << " " << off << "~" << len << dendl;
ino_invalidate_cb(callback_handle, ino, off, len);
}
void Client::_invalidate_inode_cache(Inode *in)
{
- ldout(cct, 10) << "_invalidate_inode_cache " << *in << dendl;
+ ldout(cct, 10) << __func__ << " " << *in << dendl;
// invalidate our userspace inode cache
if (cct->_conf->client_oc) {
void Client::_invalidate_inode_cache(Inode *in, int64_t off, int64_t len)
{
- ldout(cct, 10) << "_invalidate_inode_cache " << *in << " " << off << "~" << len << dendl;
+ ldout(cct, 10) << __func__ << " " << *in << " " << off << "~" << len << dendl;
// invalidate our userspace inode cache
if (cct->_conf->client_oc) {
void Client::_flush_range(Inode *in, int64_t offset, uint64_t size)
{
- assert(client_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(client_lock));
if (!in->oset.dirty_or_tx) {
ldout(cct, 10) << " nothing to flush" << dendl;
return;
}
- Mutex flock("Client::_flush_range flock");
- Cond cond;
- bool safe = false;
- Context *onflush = new C_SafeCond(&flock, &cond, &safe);
+ C_SaferCond onflush("Client::_flush_range flock");
bool ret = objectcacher->file_flush(&in->oset, &in->layout, in->snaprealm->get_snap_context(),
- offset, size, onflush);
+ offset, size, &onflush);
if (!ret) {
// wait for flush
- client_lock.Unlock();
- flock.Lock();
- while (!safe)
- cond.Wait(flock);
- flock.Unlock();
- client_lock.Lock();
+ client_lock.unlock();
+ onflush.wait();
+ client_lock.lock();
}
}
void Client::flush_set_callback(ObjectCacher::ObjectSet *oset)
{
- // Mutex::Locker l(client_lock);
- assert(client_lock.is_locked()); // will be called via dispatch() -> objecter -> ...
+ // std::lock_guard l(client_lock);
+ ceph_assert(ceph_mutex_is_locked(client_lock)); // will be called via dispatch() -> objecter -> ...
Inode *in = static_cast<Inode *>(oset->parent);
- assert(in);
+ ceph_assert(in);
_flushed(in);
}
// checks common to add_update_cap, handle_cap_grant
-void Client::check_cap_issue(Inode *in, Cap *cap, unsigned issued)
+void Client::check_cap_issue(Inode *in, unsigned issued)
{
unsigned had = in->caps_issued();
!(had & CEPH_CAP_FILE_CACHE))
in->cache_gen++;
- if ((issued & CEPH_CAP_FILE_SHARED) &&
- !(had & CEPH_CAP_FILE_SHARED)) {
- in->shared_gen++;
-
+ if ((issued & CEPH_CAP_FILE_SHARED) !=
+ (had & CEPH_CAP_FILE_SHARED)) {
+ if (issued & CEPH_CAP_FILE_SHARED)
+ in->shared_gen++;
if (in->is_dir())
clear_dir_complete_and_ordered(in, true);
}
}
void Client::add_update_cap(Inode *in, MetaSession *mds_session, uint64_t cap_id,
- unsigned issued, unsigned seq, unsigned mseq, inodeno_t realm,
- int flags, const UserPerm& cap_perms)
+ unsigned issued, unsigned wanted, unsigned seq, unsigned mseq,
+ inodeno_t realm, int flags, const UserPerm& cap_perms)
{
- Cap *cap = 0;
+ if (!in->is_any_caps()) {
+ ceph_assert(in->snaprealm == 0);
+ in->snaprealm = get_snap_realm(realm);
+ in->snaprealm->inodes_with_caps.push_back(&in->snaprealm_item);
+ ldout(cct, 15) << __func__ << " first one, opened snaprealm " << in->snaprealm << dendl;
+ } else {
+ ceph_assert(in->snaprealm);
+ if ((flags & CEPH_CAP_FLAG_AUTH) &&
+ realm != inodeno_t(-1) && in->snaprealm->ino != realm) {
+ in->snaprealm_item.remove_myself();
+ auto oldrealm = in->snaprealm;
+ in->snaprealm = get_snap_realm(realm);
+ in->snaprealm->inodes_with_caps.push_back(&in->snaprealm_item);
+ put_snap_realm(oldrealm);
+ }
+ }
+
mds_rank_t mds = mds_session->mds_num;
- if (in->caps.count(mds)) {
- cap = in->caps[mds];
+ const auto &capem = in->caps.emplace(std::piecewise_construct, std::forward_as_tuple(mds), std::forward_as_tuple(*in, mds_session));
+ Cap &cap = capem.first->second;
+ if (!capem.second) {
+ if (cap.gen < mds_session->cap_gen)
+ cap.issued = cap.implemented = CEPH_CAP_PIN;
/*
* auth mds of the inode changed. we received the cap export
* a message that was send before the cap import message. So
* don't remove caps.
*/
- if (ceph_seq_cmp(seq, cap->seq) <= 0) {
- assert(cap == in->auth_cap);
- assert(cap->cap_id == cap_id);
- seq = cap->seq;
- mseq = cap->mseq;
- issued |= cap->issued;
+ if (ceph_seq_cmp(seq, cap.seq) <= 0) {
+ if (&cap != in->auth_cap)
+ ldout(cct, 0) << "WARNING: " << "inode " << *in << " caps on mds." << mds << " != auth_cap." << dendl;
+
+ ceph_assert(cap.cap_id == cap_id);
+ seq = cap.seq;
+ mseq = cap.mseq;
+ issued |= cap.issued;
flags |= CEPH_CAP_FLAG_AUTH;
}
- } else {
- mds_session->num_caps++;
- if (!in->is_any_caps()) {
- assert(in->snaprealm == 0);
- in->snaprealm = get_snap_realm(realm);
- in->snaprealm->inodes_with_caps.push_back(&in->snaprealm_item);
- ldout(cct, 15) << "add_update_cap first one, opened snaprealm " << in->snaprealm << dendl;
- }
- in->caps[mds] = cap = new Cap;
-
- mds_session->caps.push_back(&cap->cap_item);
- cap->session = mds_session;
- cap->inode = in;
- cap->gen = mds_session->cap_gen;
}
- check_cap_issue(in, cap, issued);
+ check_cap_issue(in, issued);
if (flags & CEPH_CAP_FLAG_AUTH) {
- if (in->auth_cap != cap &&
+ if (in->auth_cap != &cap &&
(!in->auth_cap || ceph_seq_cmp(in->auth_cap->mseq, mseq) < 0)) {
if (in->auth_cap && in->flushing_cap_item.is_on_list()) {
- ldout(cct, 10) << "add_update_cap changing auth cap: "
+ ldout(cct, 10) << __func__ << " changing auth cap: "
<< "add myself to new auth MDS' flushing caps list" << dendl;
adjust_session_flushing_caps(in, in->auth_cap->session, mds_session);
}
- in->auth_cap = cap;
+ in->auth_cap = ∩
}
}
- unsigned old_caps = cap->issued;
- cap->cap_id = cap_id;
- cap->issued = issued;
- cap->implemented |= issued;
- cap->seq = seq;
- cap->issue_seq = seq;
- cap->mseq = mseq;
- cap->gen = mds_session->cap_gen;
- cap->latest_perms = cap_perms;
- ldout(cct, 10) << "add_update_cap issued " << ccap_string(old_caps) << " -> " << ccap_string(cap->issued)
+ unsigned old_caps = cap.issued;
+ cap.cap_id = cap_id;
+ cap.issued = issued;
+ cap.implemented |= issued;
+ if (ceph_seq_cmp(mseq, cap.mseq) > 0)
+ cap.wanted = wanted;
+ else
+ cap.wanted |= wanted;
+ cap.seq = seq;
+ cap.issue_seq = seq;
+ cap.mseq = mseq;
+ cap.gen = mds_session->cap_gen;
+ cap.latest_perms = cap_perms;
+ ldout(cct, 10) << __func__ << " issued " << ccap_string(old_caps) << " -> " << ccap_string(cap.issued)
<< " from mds." << mds
<< " on " << *in
<< dendl;
- if ((issued & ~old_caps) && in->auth_cap == cap) {
+ if ((issued & ~old_caps) && in->auth_cap == &cap) {
// non-auth MDS is revoking the newly grant caps ?
- for (map<mds_rank_t,Cap*>::iterator it = in->caps.begin(); it != in->caps.end(); ++it) {
- if (it->second == cap)
+ for (auto &p : in->caps) {
+ if (&p.second == &cap)
continue;
- if (it->second->implemented & ~it->second->issued & issued) {
+ if (p.second.implemented & ~p.second.issued & issued) {
check_caps(in, CHECK_CAPS_NODELAY);
break;
}
void Client::remove_cap(Cap *cap, bool queue_release)
{
- Inode *in = cap->inode;
+ auto &in = cap->inode;
MetaSession *session = cap->session;
mds_rank_t mds = cap->session->mds_num;
- ldout(cct, 10) << "remove_cap mds." << mds << " on " << *in << dendl;
+ ldout(cct, 10) << __func__ << " mds." << mds << " on " << in << dendl;
if (queue_release) {
session->enqueue_cap_release(
- in->ino,
+ in.ino,
cap->cap_id,
cap->issue_seq,
cap->mseq,
cap_epoch_barrier);
}
- if (in->auth_cap == cap) {
- if (in->flushing_cap_item.is_on_list()) {
+ if (in.auth_cap == cap) {
+ if (in.flushing_cap_item.is_on_list()) {
ldout(cct, 10) << " removing myself from flushing_cap list" << dendl;
- in->flushing_cap_item.remove_myself();
+ in.flushing_cap_item.remove_myself();
}
- in->auth_cap = NULL;
+ in.auth_cap = NULL;
}
- assert(in->caps.count(mds));
- in->caps.erase(mds);
-
- cap->cap_item.remove_myself();
- delete cap;
+ size_t n = in.caps.erase(mds);
+ ceph_assert(n == 1);
cap = nullptr;
- if (!in->is_any_caps()) {
- ldout(cct, 15) << "remove_cap last one, closing snaprealm " << in->snaprealm << dendl;
- in->snaprealm_item.remove_myself();
- put_snap_realm(in->snaprealm);
- in->snaprealm = 0;
+ if (!in.is_any_caps()) {
+ ldout(cct, 15) << __func__ << " last one, closing snaprealm " << in.snaprealm << dendl;
+ in.snaprealm_item.remove_myself();
+ put_snap_realm(in.snaprealm);
+ in.snaprealm = 0;
}
}
void Client::remove_all_caps(Inode *in)
{
while (!in->caps.empty())
- remove_cap(in->caps.begin()->second, true);
+ remove_cap(&in->caps.begin()->second, true);
}
-void Client::remove_session_caps(MetaSession *s)
+void Client::remove_session_caps(MetaSession *s, int err)
{
- ldout(cct, 10) << "remove_session_caps mds." << s->mds_num << dendl;
+ ldout(cct, 10) << __func__ << " mds." << s->mds_num << dendl;
while (s->caps.size()) {
Cap *cap = *s->caps.begin();
- Inode *in = cap->inode;
- bool dirty_caps = false, cap_snaps = false;
+ InodeRef in(&cap->inode);
+ bool dirty_caps = false;
if (in->auth_cap == cap) {
- cap_snaps = !in->cap_snaps.empty();
dirty_caps = in->dirty_caps | in->flushing_caps;
in->wanted_max_size = 0;
in->requested_max_size = 0;
- in->flags |= I_CAP_DROPPED;
+ if (in->has_any_filelocks())
+ in->flags |= I_ERROR_FILELOCK;
}
+ auto caps = cap->implemented;
+ if (cap->wanted | cap->issued)
+ in->flags |= I_CAP_DROPPED;
remove_cap(cap, false);
- signal_cond_list(in->waitfor_caps);
- if (cap_snaps) {
- InodeRef tmp_ref(in);
- in->cap_snaps.clear();
- }
+ in->cap_snaps.clear();
if (dirty_caps) {
- lderr(cct) << "remove_session_caps still has dirty|flushing caps on " << *in << dendl;
+ lderr(cct) << __func__ << " still has dirty|flushing caps on " << *in << dendl;
if (in->flushing_caps) {
num_flushing_caps--;
in->flushing_cap_tids.clear();
}
in->flushing_caps = 0;
in->mark_caps_clean();
- put_inode(in);
+ put_inode(in.get());
+ }
+ caps &= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_BUFFER;
+ if (caps && !in->caps_issued_mask(caps, true)) {
+ if (err == -EBLACKLISTED) {
+ if (in->oset.dirty_or_tx) {
+ lderr(cct) << __func__ << " still has dirty data on " << *in << dendl;
+ in->set_async_err(err);
+ }
+ objectcacher->purge_set(&in->oset);
+ } else {
+ objectcacher->release_set(&in->oset);
+ }
+ _schedule_invalidate_callback(in.get(), 0, 0);
}
+
+ signal_cond_list(in->waitfor_caps);
}
s->flushing_caps_tids.clear();
- sync_cond.Signal();
+ sync_cond.notify_all();
}
int Client::_do_remount(bool retry_on_error)
{
- uint64_t max_retries = cct->_conf->get_val<uint64_t>("mds_max_retries_on_remount_failure");
+ uint64_t max_retries = cct->_conf.get_val<uint64_t>("mds_max_retries_on_remount_failure");
errno = 0;
int r = remount_cb(callback_handle);
"return code = " << r << dendl;
}
bool should_abort =
- (cct->_conf->get_val<bool>("client_die_on_failed_remount") ||
- cct->_conf->get_val<bool>("client_die_on_failed_dentry_invalidate")) &&
+ (cct->_conf.get_val<bool>("client_die_on_failed_remount") ||
+ cct->_conf.get_val<bool>("client_die_on_failed_dentry_invalidate")) &&
!(retry_on_error && (++retries_on_invalidate < max_retries));
if (should_abort && !unmounting) {
lderr(cct) << "failed to remount for kernel dentry trimming; quitting!" << dendl;
public:
explicit C_Client_Remount(Client *c) : client(c) {}
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
client->_do_remount(true);
}
};
for (auto p = dir->dentries.begin(); p != dir->dentries.end(); ) {
Dentry *dn = p->second;
++p;
- assert(!dn->inode);
+ ceph_assert(!dn->inode);
if (dn->lru_is_expireable())
unlink(dn, true, false); // keep dir, drop dentry
}
}
}
+class C_Client_CacheRelease : public Context {
+private:
+ Client *client;
+ vinodeno_t ino;
+public:
+ C_Client_CacheRelease(Client *c, Inode *in) :
+ client(c) {
+ if (client->use_faked_inos())
+ ino = vinodeno_t(in->faked_ino, CEPH_NOSNAP);
+ else
+ ino = in->vino();
+ }
+ void finish(int r) override {
+ ceph_assert(ceph_mutex_is_not_locked_by_me(client->client_lock));
+ client->_async_inode_release(ino);
+ }
+};
+
+void Client::_async_inode_release(vinodeno_t ino)
+{
+ if (unmounting)
+ return;
+ ldout(cct, 10) << __func__ << " " << ino << dendl;
+ ino_release_cb(callback_handle, ino);
+}
+
+void Client::_schedule_ino_release_callback(Inode *in) {
+
+ if (ino_release_cb)
+ // we queue the invalidate, which calls the callback and decrements the ref
+ async_ino_releasor.queue(new C_Client_CacheRelease(this, in));
+}
+
void Client::trim_caps(MetaSession *s, uint64_t max)
{
mds_rank_t mds = s->mds_num;
size_t caps_size = s->caps.size();
- ldout(cct, 10) << "trim_caps mds." << mds << " max " << max
+ ldout(cct, 10) << __func__ << " mds." << mds << " max " << max
<< " caps " << caps_size << dendl;
uint64_t trimmed = 0;
* looking at from getting deleted during traversal. */
while ((caps_size - trimmed) > max && !p.end()) {
Cap *cap = *p;
- InodeRef in(cap->inode);
+ InodeRef in(&cap->inode);
// Increment p early because it will be invalidated if cap
// is deleted inside remove_cap
ldout(cct, 20) << " trying to trim dentries for " << *in << dendl;
_trim_negative_child_dentries(in);
bool all = true;
- set<Dentry*>::iterator q = in->dn_set.begin();
- while (q != in->dn_set.end()) {
- Dentry *dn = *q++;
+ auto q = in->dentries.begin();
+ while (q != in->dentries.end()) {
+ Dentry *dn = *q;
+ ++q;
if (dn->lru_is_expireable()) {
if (can_invalidate_dentries &&
- dn->dir->parent_inode->ino == MDS_INO_ROOT) {
+ dn->dir->parent_inode->ino == CEPH_INO_ROOT) {
// Only issue one of these per DN for inodes in root: handle
// others more efficiently by calling for root-child DNs at
// the end of this function.
all = false;
}
}
- if (all && in->ino != MDS_INO_ROOT) {
+ if (in->ll_ref == 1 && in->ino != CEPH_INO_ROOT) {
+ _schedule_ino_release_callback(in.get());
+ }
+ if (all && in->ino != CEPH_INO_ROOT) {
ldout(cct, 20) << __func__ << " counting as trimmed: " << *in << dendl;
trimmed++;
}
to_trim.clear();
caps_size = s->caps.size();
- if (caps_size > max)
+ if (caps_size > (size_t)max)
_invalidate_kernel_dcache();
}
{
s->readonly = true;
for (xlist<Cap*>::iterator p = s->caps.begin(); !p.end(); ++p) {
- Inode *in = (*p)->inode;
- if (in->caps_wanted() & CEPH_CAP_FILE_WR)
- signal_cond_list(in->waitfor_caps);
+ auto &in = (*p)->inode;
+ if (in.caps_wanted() & CEPH_CAP_FILE_WR)
+ signal_cond_list(in.waitfor_caps);
}
}
MetaSession *session = in->auth_cap->session;
int flushing = in->dirty_caps;
- assert(flushing);
+ ceph_assert(flushing);
ceph_tid_t flush_tid = ++last_flush_tid;
in->flushing_cap_tids[flush_tid] = flushing;
if (!in->flushing_caps) {
- ldout(cct, 10) << "mark_caps_flushing " << ccap_string(flushing) << " " << *in << dendl;
+ ldout(cct, 10) << __func__ << " " << ccap_string(flushing) << " " << *in << dendl;
num_flushing_caps++;
} else {
- ldout(cct, 10) << "mark_caps_flushing (more) " << ccap_string(flushing) << " " << *in << dendl;
+ ldout(cct, 10) << __func__ << " (more) " << ccap_string(flushing) << " " << *in << dendl;
}
in->flushing_caps |= flushing;
}
}
-void Client::flush_caps(Inode *in, MetaSession *session, bool sync)
-{
- ldout(cct, 10) << "flush_caps " << in << " mds." << session->mds_num << dendl;
- Cap *cap = in->auth_cap;
- assert(cap->session == session);
-
- for (map<ceph_tid_t,int>::iterator p = in->flushing_cap_tids.begin();
- p != in->flushing_cap_tids.end();
- ++p) {
- bool req_sync = false;
-
- /* If this is a synchronous request, then flush the journal on last one */
- if (sync && (p->first == in->flushing_cap_tids.rbegin()->first))
- req_sync = true;
-
- send_cap(in, session, cap, req_sync,
- (get_caps_used(in) | in->caps_dirty()),
- in->caps_wanted(), (cap->issued | cap->implemented),
- p->second, p->first);
- }
-}
-
void Client::wait_sync_caps(Inode *in, ceph_tid_t want)
{
while (in->flushing_caps) {
map<ceph_tid_t, int>::iterator it = in->flushing_cap_tids.begin();
- assert(it != in->flushing_cap_tids.end());
+ ceph_assert(it != in->flushing_cap_tids.end());
if (it->first > want)
break;
- ldout(cct, 10) << "wait_sync_caps on " << *in << " flushing "
+ ldout(cct, 10) << __func__ << " on " << *in << " flushing "
<< ccap_string(it->second) << " want " << want
<< " last " << it->first << dendl;
wait_on_list(in->waitfor_caps);
void Client::wait_sync_caps(ceph_tid_t want)
{
retry:
- ldout(cct, 10) << "wait_sync_caps want " << want << " (last is " << last_flush_tid << ", "
+ ldout(cct, 10) << __func__ << " want " << want << " (last is " << last_flush_tid << ", "
<< num_flushing_caps << " total flushing)" << dendl;
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end();
- ++p) {
- MetaSession *s = p->second;
+ for (auto &p : mds_sessions) {
+ MetaSession *s = &p.second;
if (s->flushing_caps_tids.empty())
continue;
ceph_tid_t oldest_tid = *s->flushing_caps_tids.begin();
if (oldest_tid <= want) {
- ldout(cct, 10) << " waiting on mds." << p->first << " tid " << oldest_tid
+ ldout(cct, 10) << " waiting on mds." << p.first << " tid " << oldest_tid
<< " (want " << want << ")" << dendl;
- sync_cond.Wait(client_lock);
+ std::unique_lock l{client_lock, std::adopt_lock};
+ sync_cond.wait(l);
+ l.release();
goto retry;
}
}
}
+void Client::kick_flushing_caps(Inode *in, MetaSession *session)
+{
+ in->flags &= ~I_KICK_FLUSH;
+
+ Cap *cap = in->auth_cap;
+ ceph_assert(cap->session == session);
+
+ ceph_tid_t last_snap_flush = 0;
+ for (auto p = in->flushing_cap_tids.rbegin();
+ p != in->flushing_cap_tids.rend();
+ ++p) {
+ if (!p->second) {
+ last_snap_flush = p->first;
+ break;
+ }
+ }
+
+ int wanted = in->caps_wanted();
+ int used = get_caps_used(in) | in->caps_dirty();
+ auto it = in->cap_snaps.begin();
+ for (auto& p : in->flushing_cap_tids) {
+ if (p.second) {
+ int msg_flags = p.first < last_snap_flush ? MClientCaps::FLAG_PENDING_CAPSNAP : 0;
+ send_cap(in, session, cap, msg_flags, used, wanted, (cap->issued | cap->implemented),
+ p.second, p.first);
+ } else {
+ ceph_assert(it != in->cap_snaps.end());
+ ceph_assert(it->second.flush_tid == p.first);
+ send_flush_snap(in, session, it->first, it->second);
+ ++it;
+ }
+ }
+}
+
void Client::kick_flushing_caps(MetaSession *session)
{
mds_rank_t mds = session->mds_num;
- ldout(cct, 10) << "kick_flushing_caps mds." << mds << dendl;
+ ldout(cct, 10) << __func__ << " mds." << mds << dendl;
for (xlist<Inode*>::iterator p = session->flushing_caps.begin(); !p.end(); ++p) {
Inode *in = *p;
- if (session->early_flushing_caps.count(in))
- continue;
- ldout(cct, 20) << " reflushing caps on " << *in << " to mds." << mds << dendl;
- if (in->cap_snaps.size())
- flush_snaps(in, true);
- if (in->flushing_caps)
- flush_caps(in, session);
+ if (in->flags & I_KICK_FLUSH) {
+ ldout(cct, 20) << " reflushing caps on " << *in << " to mds." << mds << dendl;
+ kick_flushing_caps(in, session);
+ }
}
-
- session->early_flushing_caps.clear();
}
void Client::early_kick_flushing_caps(MetaSession *session)
{
- session->early_flushing_caps.clear();
-
for (xlist<Inode*>::iterator p = session->flushing_caps.begin(); !p.end(); ++p) {
Inode *in = *p;
- assert(in->auth_cap);
+ Cap *cap = in->auth_cap;
+ ceph_assert(cap);
// if flushing caps were revoked, we re-send the cap flush in client reconnect
// stage. This guarantees that MDS processes the cap flush message before issuing
// the flushing caps to other client.
- if ((in->flushing_caps & in->auth_cap->issued) == in->flushing_caps)
+ if ((in->flushing_caps & in->auth_cap->issued) == in->flushing_caps) {
+ in->flags |= I_KICK_FLUSH;
continue;
+ }
ldout(cct, 20) << " reflushing caps (early_kick) on " << *in
<< " to mds." << session->mds_num << dendl;
+ // send_reconnect() also will reset these sequence numbers. make sure
+ // sequence numbers in cap flush message match later reconnect message.
+ cap->seq = 0;
+ cap->issue_seq = 0;
+ cap->mseq = 0;
+ cap->issued = cap->implemented;
- session->early_flushing_caps.insert(in);
-
- if (in->cap_snaps.size())
- flush_snaps(in, true);
- if (in->flushing_caps)
- flush_caps(in, session);
-
- }
-}
-
-void Client::kick_maxsize_requests(MetaSession *session)
-{
- xlist<Cap*>::iterator iter = session->caps.begin();
- while (!iter.end()){
- (*iter)->inode->requested_max_size = 0;
- (*iter)->inode->wanted_max_size = 0;
- signal_cond_list((*iter)->inode->waitfor_caps);
- ++iter;
+ kick_flushing_caps(in, session);
}
}
realm = q.front();
q.pop_front();
- ldout(cct, 10) << "invalidate_snaprealm_and_children " << *realm << dendl;
+ ldout(cct, 10) << __func__ << " " << *realm << dendl;
realm->invalidate_cache();
for (set<SnapRealm*>::iterator p = realm->pchildren.begin();
SnapRealm *realm = snap_realms[r];
if (!realm)
snap_realms[r] = realm = new SnapRealm(r);
- ldout(cct, 20) << "get_snap_realm " << r << " " << realm << " " << realm->nref << " -> " << (realm->nref + 1) << dendl;
+ ldout(cct, 20) << __func__ << " " << r << " " << realm << " " << realm->nref << " -> " << (realm->nref + 1) << dendl;
realm->nref++;
return realm;
}
SnapRealm *Client::get_snap_realm_maybe(inodeno_t r)
{
if (snap_realms.count(r) == 0) {
- ldout(cct, 20) << "get_snap_realm_maybe " << r << " fail" << dendl;
+ ldout(cct, 20) << __func__ << " " << r << " fail" << dendl;
return NULL;
}
SnapRealm *realm = snap_realms[r];
- ldout(cct, 20) << "get_snap_realm_maybe " << r << " " << realm << " " << realm->nref << " -> " << (realm->nref + 1) << dendl;
+ ldout(cct, 20) << __func__ << " " << r << " " << realm << " " << realm->nref << " -> " << (realm->nref + 1) << dendl;
realm->nref++;
return realm;
}
void Client::put_snap_realm(SnapRealm *realm)
{
- ldout(cct, 20) << "put_snap_realm " << realm->ino << " " << realm
+ ldout(cct, 20) << __func__ << " " << realm->ino << " " << realm
<< " " << realm->nref << " -> " << (realm->nref - 1) << dendl;
if (--realm->nref == 0) {
snap_realms.erase(realm->ino);
bool Client::adjust_realm_parent(SnapRealm *realm, inodeno_t parent)
{
if (realm->parent != parent) {
- ldout(cct, 10) << "adjust_realm_parent " << *realm
+ ldout(cct, 10) << __func__ << " " << *realm
<< " " << realm->parent << " -> " << parent << dendl;
realm->parent = parent;
if (realm->pparent) {
}
-void Client::update_snap_trace(bufferlist& bl, SnapRealm **realm_ret, bool flush)
+void Client::update_snap_trace(const bufferlist& bl, SnapRealm **realm_ret, bool flush)
{
SnapRealm *first_realm = NULL;
- ldout(cct, 10) << "update_snap_trace len " << bl.length() << dendl;
+ ldout(cct, 10) << __func__ << " len " << bl.length() << dendl;
map<SnapRealm*, SnapContext> dirty_realms;
- bufferlist::iterator p = bl.begin();
+ auto p = bl.cbegin();
while (!p.end()) {
SnapRealmInfo info;
- ::decode(info, p);
+ decode(info, p);
SnapRealm *realm = get_snap_realm(info.ino());
bool invalidate = false;
if (info.seq() > realm->seq) {
- ldout(cct, 10) << "update_snap_trace " << *realm << " seq " << info.seq() << " > " << realm->seq
+ ldout(cct, 10) << __func__ << " " << *realm << " seq " << info.seq() << " > " << realm->seq
<< dendl;
if (flush) {
if (invalidate) {
invalidate_snaprealm_and_children(realm);
- ldout(cct, 15) << "update_snap_trace " << *realm << " self|parent updated" << dendl;
+ ldout(cct, 15) << __func__ << " " << *realm << " self|parent updated" << dendl;
ldout(cct, 15) << " snapc " << realm->get_snap_context() << dendl;
} else {
- ldout(cct, 10) << "update_snap_trace " << *realm << " seq " << info.seq()
+ ldout(cct, 10) << __func__ << " " << *realm << " seq " << info.seq()
<< " <= " << realm->seq << " and same parent, SKIPPING" << dendl;
}
-
+
if (!first_realm)
first_realm = realm;
else
put_snap_realm(realm);
}
- for (map<SnapRealm*, SnapContext>::iterator q = dirty_realms.begin();
- q != dirty_realms.end();
- ++q) {
- SnapRealm *realm = q->first;
+ for (auto &[realm, snapc] : dirty_realms) {
// if there are new snaps ?
- if (has_new_snaps(q->second, realm->get_snap_context())) {
+ if (has_new_snaps(snapc, realm->get_snap_context())) {
ldout(cct, 10) << " flushing caps on " << *realm << dendl;
- xlist<Inode*>::iterator r = realm->inodes_with_caps.begin();
- while (!r.end()) {
- Inode *in = *r;
- ++r;
- queue_cap_snap(in, q->second);
+ for (auto&& in : realm->inodes_with_caps) {
+ queue_cap_snap(in, snapc);
}
} else {
ldout(cct, 10) << " no new snap on " << *realm << dendl;
put_snap_realm(first_realm);
}
-void Client::handle_snap(MClientSnap *m)
+void Client::handle_snap(const MConstRef<MClientSnap>& m)
{
- ldout(cct, 10) << "handle_snap " << *m << dendl;
+ ldout(cct, 10) << __func__ << " " << *m << dendl;
mds_rank_t mds = mds_rank_t(m->get_source().num());
MetaSession *session = _get_mds_session(mds, m->get_connection().get());
if (!session) {
- m->put();
return;
}
SnapRealm *realm = 0;
if (m->head.op == CEPH_SNAP_OP_SPLIT) {
- assert(m->head.split);
+ ceph_assert(m->head.split);
SnapRealmInfo info;
- bufferlist::iterator p = m->bl.begin();
- ::decode(info, p);
- assert(info.ino() == m->head.split);
+ auto p = m->bl.cbegin();
+ decode(info, p);
+ ceph_assert(info.ino() == m->head.split);
// flush, then move, ino's.
realm = get_snap_realm(info.ino());
ldout(cct, 10) << " splitting off " << *realm << dendl;
- for (vector<inodeno_t>::iterator p = m->split_inos.begin();
- p != m->split_inos.end();
- ++p) {
- vinodeno_t vino(*p, CEPH_NOSNAP);
+ for (auto& ino : m->split_inos) {
+ vinodeno_t vino(ino, CEPH_NOSNAP);
if (inode_map.count(vino)) {
Inode *in = inode_map[vino];
if (!in->snaprealm || in->snaprealm == realm)
}
// move child snaprealms, too
- for (vector<inodeno_t>::iterator p = m->split_realms.begin();
- p != m->split_realms.end();
- ++p) {
- ldout(cct, 10) << "adjusting snaprealm " << *p << " parent" << dendl;
- SnapRealm *child = get_snap_realm_maybe(*p);
+ for (auto& child_realm : m->split_realms) {
+ ldout(cct, 10) << "adjusting snaprealm " << child_realm << " parent" << dendl;
+ SnapRealm *child = get_snap_realm_maybe(child_realm);
if (!child)
continue;
adjust_realm_parent(child, realm->ino);
}
put_snap_realm(realm);
}
-
- m->put();
}
-void Client::handle_quota(MClientQuota *m)
+void Client::handle_quota(const MConstRef<MClientQuota>& m)
{
mds_rank_t mds = mds_rank_t(m->get_source().num());
MetaSession *session = _get_mds_session(mds, m->get_connection().get());
if (!session) {
- m->put();
return;
}
got_mds_push(session);
- ldout(cct, 10) << "handle_quota " << *m << " from mds." << mds << dendl;
+ ldout(cct, 10) << __func__ << " " << *m << " from mds." << mds << dendl;
vinodeno_t vino(m->ino, CEPH_NOSNAP);
if (inode_map.count(vino)) {
in->rstat = m->rstat;
}
}
-
- m->put();
}
-void Client::handle_caps(MClientCaps *m)
+void Client::handle_caps(const MConstRef<MClientCaps>& m)
{
mds_rank_t mds = mds_rank_t(m->get_source().num());
MetaSession *session = _get_mds_session(mds, m->get_connection().get());
if (!session) {
- m->put();
return;
}
got_mds_push(session);
- m->clear_payload(); // for if/when we send back to MDS
-
- Inode *in = 0;
+ Inode *in;
vinodeno_t vino(m->get_ino(), CEPH_NOSNAP);
- if (inode_map.count(vino))
- in = inode_map[vino];
- if (!in) {
+ if (auto it = inode_map.find(vino); it != inode_map.end()) {
+ in = it->second;
+ } else {
if (m->get_op() == CEPH_CAP_OP_IMPORT) {
- ldout(cct, 5) << "handle_caps don't have vino " << vino << " on IMPORT, immediately releasing" << dendl;
+ ldout(cct, 5) << __func__ << " don't have vino " << vino << " on IMPORT, immediately releasing" << dendl;
session->enqueue_cap_release(
m->get_ino(),
m->get_cap_id(),
m->get_mseq(),
cap_epoch_barrier);
} else {
- ldout(cct, 5) << "handle_caps don't have vino " << vino << ", dropping" << dendl;
+ ldout(cct, 5) << __func__ << " don't have vino " << vino << ", dropping" << dendl;
}
- m->put();
// in case the mds is waiting on e.g. a revocation
flush_cap_releases();
}
switch (m->get_op()) {
- case CEPH_CAP_OP_EXPORT:
- return handle_cap_export(session, in, m);
- case CEPH_CAP_OP_FLUSHSNAP_ACK:
- return handle_cap_flushsnap_ack(session, in, m);
- case CEPH_CAP_OP_IMPORT:
- handle_cap_import(session, in, m);
+ case CEPH_CAP_OP_EXPORT: return handle_cap_export(session, in, m);
+ case CEPH_CAP_OP_FLUSHSNAP_ACK: return handle_cap_flushsnap_ack(session, in, m);
+ case CEPH_CAP_OP_IMPORT: /* no return */ handle_cap_import(session, in, m);
}
- if (in->caps.count(mds) == 0) {
- ldout(cct, 5) << "handle_caps don't have " << *in << " cap on mds." << mds << dendl;
- m->put();
- return;
- }
-
- Cap *cap = in->caps[mds];
+ if (auto it = in->caps.find(mds); it != in->caps.end()) {
+ Cap &cap = in->caps.at(mds);
- switch (m->get_op()) {
- case CEPH_CAP_OP_TRUNC: return handle_cap_trunc(session, in, m);
- case CEPH_CAP_OP_IMPORT:
- case CEPH_CAP_OP_REVOKE:
- case CEPH_CAP_OP_GRANT: return handle_cap_grant(session, in, cap, m);
- case CEPH_CAP_OP_FLUSH_ACK: return handle_cap_flush_ack(session, in, cap, m);
- default:
- m->put();
+ switch (m->get_op()) {
+ case CEPH_CAP_OP_TRUNC: return handle_cap_trunc(session, in, m);
+ case CEPH_CAP_OP_IMPORT:
+ case CEPH_CAP_OP_REVOKE:
+ case CEPH_CAP_OP_GRANT: return handle_cap_grant(session, in, &cap, m);
+ case CEPH_CAP_OP_FLUSH_ACK: return handle_cap_flush_ack(session, in, &cap, m);
+ }
+ } else {
+ ldout(cct, 5) << __func__ << " don't have " << *in << " cap on mds." << mds << dendl;
+ return;
}
}
-void Client::handle_cap_import(MetaSession *session, Inode *in, MClientCaps *m)
+void Client::handle_cap_import(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m)
{
mds_rank_t mds = session->mds_num;
- ldout(cct, 5) << "handle_cap_import ino " << m->get_ino() << " mseq " << m->get_mseq()
+ ldout(cct, 5) << __func__ << " ino " << m->get_ino() << " mseq " << m->get_mseq()
<< " IMPORT from mds." << mds << dendl;
const mds_rank_t peer_mds = mds_rank_t(m->peer.mds);
Cap *cap = NULL;
UserPerm cap_perms;
- if (m->peer.cap_id && in->caps.count(peer_mds)) {
- cap = in->caps[peer_mds];
- if (cap) {
- cap_perms = cap->latest_perms;
- }
+ if (auto it = in->caps.find(peer_mds); m->peer.cap_id && it != in->caps.end()) {
+ cap = &it->second;
+ cap_perms = cap->latest_perms;
}
// add/update it
SnapRealm *realm = NULL;
update_snap_trace(m->snapbl, &realm);
+ int issued = m->get_caps();
+ int wanted = m->get_wanted();
add_update_cap(in, session, m->get_cap_id(),
- m->get_caps(), m->get_seq(), m->get_mseq(), m->get_realm(),
- CEPH_CAP_FLAG_AUTH, cap_perms);
+ issued, wanted, m->get_seq(), m->get_mseq(),
+ m->get_realm(), CEPH_CAP_FLAG_AUTH, cap_perms);
if (cap && cap->cap_id == m->peer.cap_id) {
remove_cap(cap, (m->peer.flags & CEPH_CAP_FLAG_RELEASE));
if (realm)
put_snap_realm(realm);
- if (in->auth_cap && in->auth_cap->session->mds_num == mds) {
+ if (in->auth_cap && in->auth_cap->session == session) {
+ if (!(wanted & CEPH_CAP_ANY_FILE_WR) ||
+ in->requested_max_size > m->get_max_size()) {
+ in->requested_max_size = 0;
+ ldout(cct, 15) << "reset requested_max_size after cap import" << dendl;
+ }
// reflush any/all caps (if we are now the auth_cap)
- if (in->cap_snaps.size())
- flush_snaps(in, true);
- if (in->flushing_caps)
- flush_caps(in, session);
+ kick_flushing_caps(in, session);
}
}
-void Client::handle_cap_export(MetaSession *session, Inode *in, MClientCaps *m)
+void Client::handle_cap_export(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m)
{
mds_rank_t mds = session->mds_num;
- ldout(cct, 5) << "handle_cap_export ino " << m->get_ino() << " mseq " << m->get_mseq()
+ ldout(cct, 5) << __func__ << " ino " << m->get_ino() << " mseq " << m->get_mseq()
<< " EXPORT from mds." << mds << dendl;
- Cap *cap = NULL;
- if (in->caps.count(mds))
- cap = in->caps[mds];
-
- const mds_rank_t peer_mds = mds_rank_t(m->peer.mds);
-
- if (cap && cap->cap_id == m->get_cap_id()) {
- if (m->peer.cap_id) {
- MetaSession *tsession = _get_or_open_mds_session(peer_mds);
- if (in->caps.count(peer_mds)) {
- Cap *tcap = in->caps[peer_mds];
- if (tcap->cap_id == m->peer.cap_id &&
- ceph_seq_cmp(tcap->seq, m->peer.seq) < 0) {
- tcap->cap_id = m->peer.cap_id;
- tcap->seq = m->peer.seq - 1;
- tcap->issue_seq = tcap->seq;
- tcap->mseq = m->peer.mseq;
- tcap->issued |= cap->issued;
- tcap->implemented |= cap->issued;
- if (cap == in->auth_cap)
- in->auth_cap = tcap;
- if (in->auth_cap == tcap && in->flushing_cap_item.is_on_list())
- adjust_session_flushing_caps(in, session, tsession);
- }
+ auto it = in->caps.find(mds);
+ if (it != in->caps.end()) {
+ Cap &cap = it->second;
+ if (cap.cap_id == m->get_cap_id()) {
+ if (m->peer.cap_id) {
+ const auto peer_mds = mds_rank_t(m->peer.mds);
+ MetaSession *tsession = _get_or_open_mds_session(peer_mds);
+ auto it = in->caps.find(peer_mds);
+ if (it != in->caps.end()) {
+ Cap &tcap = it->second;
+ if (tcap.cap_id == m->peer.cap_id &&
+ ceph_seq_cmp(tcap.seq, m->peer.seq) < 0) {
+ tcap.cap_id = m->peer.cap_id;
+ tcap.seq = m->peer.seq - 1;
+ tcap.issue_seq = tcap.seq;
+ tcap.issued |= cap.issued;
+ tcap.implemented |= cap.issued;
+ if (&cap == in->auth_cap)
+ in->auth_cap = &tcap;
+ if (in->auth_cap == &tcap && in->flushing_cap_item.is_on_list())
+ adjust_session_flushing_caps(in, session, tsession);
+ }
+ } else {
+ add_update_cap(in, tsession, m->peer.cap_id, cap.issued, 0,
+ m->peer.seq - 1, m->peer.mseq, (uint64_t)-1,
+ &cap == in->auth_cap ? CEPH_CAP_FLAG_AUTH : 0,
+ cap.latest_perms);
+ }
} else {
- add_update_cap(in, tsession, m->peer.cap_id, cap->issued,
- m->peer.seq - 1, m->peer.mseq, (uint64_t)-1,
- cap == in->auth_cap ? CEPH_CAP_FLAG_AUTH : 0,
- cap->latest_perms);
+ if (cap.wanted | cap.issued)
+ in->flags |= I_CAP_DROPPED;
}
- } else {
- if (cap == in->auth_cap)
- in->flags |= I_CAP_DROPPED;
- }
- remove_cap(cap, false);
+ remove_cap(&cap, false);
+ }
}
-
- m->put();
}
-void Client::handle_cap_trunc(MetaSession *session, Inode *in, MClientCaps *m)
+void Client::handle_cap_trunc(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m)
{
mds_rank_t mds = session->mds_num;
- assert(in->caps[mds]);
+ ceph_assert(in->caps.count(mds));
- ldout(cct, 10) << "handle_cap_trunc on ino " << *in
+ ldout(cct, 10) << __func__ << " on ino " << *in
<< " size " << in->size << " -> " << m->get_size()
<< dendl;
issued |= in->caps_dirty();
update_inode_file_size(in, issued, m->get_size(),
m->get_truncate_seq(), m->get_truncate_size());
- m->put();
}
-void Client::handle_cap_flush_ack(MetaSession *session, Inode *in, Cap *cap, MClientCaps *m)
+void Client::handle_cap_flush_ack(MetaSession *session, Inode *in, Cap *cap, const MConstRef<MClientCaps>& m)
{
ceph_tid_t flush_ack_tid = m->get_client_tid();
int dirty = m->get_dirty();
int cleaned = 0;
int flushed = 0;
- for (map<ceph_tid_t, int>::iterator it = in->flushing_cap_tids.begin();
- it != in->flushing_cap_tids.end(); ) {
+ auto it = in->flushing_cap_tids.begin();
+ if (it->first < flush_ack_tid) {
+ ldout(cct, 0) << __func__ << " mds." << session->mds_num
+ << " got unexpected flush ack tid " << flush_ack_tid
+ << " expected is " << it->first << dendl;
+ }
+ for (; it != in->flushing_cap_tids.end(); ) {
+ if (!it->second) {
+ // cap snap
+ ++it;
+ continue;
+ }
if (it->first == flush_ack_tid)
cleaned = it->second;
if (it->first <= flush_ack_tid) {
++it;
}
- ldout(cct, 5) << "handle_cap_flush_ack mds." << session->mds_num
+ ldout(cct, 5) << __func__ << " mds." << session->mds_num
<< " cleaned " << ccap_string(cleaned) << " on " << *in
<< " with " << ccap_string(dirty) << dendl;
signal_cond_list(in->waitfor_caps);
if (session->flushing_caps_tids.empty() ||
*session->flushing_caps_tids.begin() > flush_ack_tid)
- sync_cond.Signal();
+ sync_cond.notify_all();
}
if (!dirty) {
if (in->flushing_caps == 0) {
ldout(cct, 10) << " " << *in << " !flushing" << dendl;
num_flushing_caps--;
- if (in->cap_snaps.empty())
+ if (in->flushing_cap_tids.empty())
in->flushing_cap_item.remove_myself();
}
if (!in->caps_dirty())
put_inode(in);
}
}
-
- m->put();
}
-void Client::handle_cap_flushsnap_ack(MetaSession *session, Inode *in, MClientCaps *m)
+void Client::handle_cap_flushsnap_ack(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m)
{
+ ceph_tid_t flush_ack_tid = m->get_client_tid();
mds_rank_t mds = session->mds_num;
- assert(in->caps[mds]);
+ ceph_assert(in->caps.count(mds));
snapid_t follows = m->get_snap_follows();
- if (in->cap_snaps.count(follows)) {
- CapSnap &capsnap = in->cap_snaps.at(follows);
- if (m->get_client_tid() != capsnap.flush_tid) {
- ldout(cct, 10) << " tid " << m->get_client_tid() << " != " << capsnap.flush_tid << dendl;
+ if (auto it = in->cap_snaps.find(follows); it != in->cap_snaps.end()) {
+ auto& capsnap = it->second;
+ if (flush_ack_tid != capsnap.flush_tid) {
+ ldout(cct, 10) << " tid " << flush_ack_tid << " != " << capsnap.flush_tid << dendl;
} else {
- ldout(cct, 5) << "handle_cap_flushedsnap mds." << mds << " flushed snap follows " << follows
+ InodeRef tmp_ref(in);
+ ldout(cct, 5) << __func__ << " mds." << mds << " flushed snap follows " << follows
<< " on " << *in << dendl;
- InodeRef tmp_ref;
- if (in->get_num_ref() == 1)
- tmp_ref = in; // make sure inode not get freed while erasing item from in->cap_snaps
- if (in->flushing_caps == 0 && in->cap_snaps.empty())
- in->flushing_cap_item.remove_myself();
session->flushing_caps_tids.erase(capsnap.flush_tid);
- in->cap_snaps.erase(follows);
+ in->flushing_cap_tids.erase(capsnap.flush_tid);
+ if (in->flushing_caps == 0 && in->flushing_cap_tids.empty())
+ in->flushing_cap_item.remove_myself();
+ in->cap_snaps.erase(it);
+
+ signal_cond_list(in->waitfor_caps);
+ if (session->flushing_caps_tids.empty() ||
+ *session->flushing_caps_tids.begin() > flush_ack_tid)
+ sync_cond.notify_all();
}
} else {
- ldout(cct, 5) << "handle_cap_flushedsnap DUP(?) mds." << mds << " flushed snap follows " << follows
+ ldout(cct, 5) << __func__ << " DUP(?) mds." << mds << " flushed snap follows " << follows
<< " on " << *in << dendl;
// we may not have it if we send multiple FLUSHSNAP requests and (got multiple FLUSHEDSNAPs back)
}
-
- m->put();
}
class C_Client_DentryInvalidate : public Context {
}
void finish(int r) override {
// _async_dentry_invalidate is responsible for its own locking
- assert(!client->client_lock.is_locked_by_me());
+ ceph_assert(ceph_mutex_is_not_locked_by_me(client->client_lock));
client->_async_dentry_invalidate(dirino, ino, name);
}
};
{
if (unmounting)
return;
- ldout(cct, 10) << "_async_dentry_invalidate '" << name << "' ino " << ino
+ ldout(cct, 10) << __func__ << " '" << name << "' ino " << ino
<< " in dir " << dirino << dendl;
- dentry_invalidate_cb(callback_handle, dirino, ino, name);
+ dentry_invalidate_cb(callback_handle, dirino, ino, name.c_str(), name.length());
}
void Client::_schedule_invalidate_dentry_callback(Dentry *dn, bool del)
void Client::_try_to_trim_inode(Inode *in, bool sched_inval)
{
int ref = in->get_num_ref();
+ ldout(cct, 5) << __func__ << " in " << *in <<dendl;
if (in->dir && !in->dir->dentries.empty()) {
for (auto p = in->dir->dentries.begin();
--ref;
}
- if (ref > 0 && in->ll_ref > 0 && sched_inval) {
- set<Dentry*>::iterator q = in->dn_set.begin();
- while (q != in->dn_set.end()) {
- Dentry *dn = *q++;
- // FIXME: we play lots of unlink/link tricks when handling MDS replies,
- // so in->dn_set doesn't always reflect the state of kernel's dcache.
- _schedule_invalidate_dentry_callback(dn, true);
+ if (ref > 0) {
+ auto q = in->dentries.begin();
+ while (q != in->dentries.end()) {
+ Dentry *dn = *q;
+ ++q;
+ if( in->ll_ref > 0 && sched_inval) {
+ // FIXME: we play lots of unlink/link tricks when handling MDS replies,
+ // so in->dentries doesn't always reflect the state of kernel's dcache.
+ _schedule_invalidate_dentry_callback(dn, true);
+ }
unlink(dn, true, true);
}
}
}
-void Client::handle_cap_grant(MetaSession *session, Inode *in, Cap *cap, MClientCaps *m)
+void Client::handle_cap_grant(MetaSession *session, Inode *in, Cap *cap, const MConstRef<MClientCaps>& m)
{
mds_rank_t mds = session->mds_num;
int used = get_caps_used(in);
int wanted = in->caps_wanted();
- const int old_caps = cap->issued;
- const int new_caps = m->get_caps();
- ldout(cct, 5) << "handle_cap_grant on in " << m->get_ino()
+ const unsigned new_caps = m->get_caps();
+ const bool was_stale = session->cap_gen > cap->gen;
+ ldout(cct, 5) << __func__ << " on in " << m->get_ino()
<< " mds." << mds << " seq " << m->get_seq()
<< " caps now " << ccap_string(new_caps)
- << " was " << ccap_string(old_caps) << dendl;
+ << " was " << ccap_string(cap->issued)
+ << (was_stale ? " (stale)" : "") << dendl;
+
+ if (was_stale)
+ cap->issued = cap->implemented = CEPH_CAP_PIN;
cap->seq = m->get_seq();
cap->gen = session->cap_gen;
+ check_cap_issue(in, new_caps);
+
// update inode
int issued;
in->caps_issued(&issued);
if (!(issued & CEPH_CAP_XATTR_EXCL) &&
m->xattrbl.length() &&
m->head.xattr_version > in->xattr_version) {
- bufferlist::iterator p = m->xattrbl.begin();
- ::decode(in->xattrs, p);
+ auto p = m->xattrbl.cbegin();
+ decode(in->xattrs, p);
in->xattr_version = m->head.xattr_version;
}
}
bool check = false;
- if (m->get_op() == CEPH_CAP_OP_IMPORT && m->get_wanted() != wanted)
+ if ((was_stale || m->get_op() == CEPH_CAP_OP_IMPORT) &&
+ (wanted & ~(cap->wanted | new_caps))) {
+ // If mds is importing cap, prior cap messages that update 'wanted'
+ // may get dropped by mds (migrate seq mismatch).
+ //
+ // We don't send cap message to update 'wanted' if what we want are
+ // already issued. If mds revokes caps, cap message that releases caps
+ // also tells mds what we want. But if caps got revoked by mds forcedly
+ // (session stale). We may haven't told mds what we want.
check = true;
+ }
- check_cap_issue(in, cap, new_caps);
// update caps
- int revoked = old_caps & ~new_caps;
+ auto revoked = cap->issued & ~new_caps;
if (revoked) {
ldout(cct, 10) << " revocation of " << ccap_string(revoked) << dendl;
cap->issued = new_caps;
else if (revoked & ceph_deleg_caps_for_type(CEPH_DELEGATION_WR))
in->recall_deleg(true);
- if ((used & revoked & CEPH_CAP_FILE_BUFFER) &&
+ used = adjust_caps_used_for_lazyio(used, cap->issued, cap->implemented);
+ if ((used & revoked & (CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO)) &&
!_flush(in, new C_Client_FlushComplete(this, in))) {
// waitin' for flush
- } else if (revoked & CEPH_CAP_FILE_CACHE) {
+ } else if (used & revoked & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) {
if (_release(in))
check = true;
} else {
cap->wanted = 0; // don't let check_caps skip sending a response to MDS
check = true;
}
- } else if (old_caps == new_caps) {
- ldout(cct, 10) << " caps unchanged at " << ccap_string(old_caps) << dendl;
+ } else if (cap->issued == new_caps) {
+ ldout(cct, 10) << " caps unchanged at " << ccap_string(cap->issued) << dendl;
} else {
- ldout(cct, 10) << " grant, new caps are " << ccap_string(new_caps & ~old_caps) << dendl;
+ ldout(cct, 10) << " grant, new caps are " << ccap_string(new_caps & ~cap->issued) << dendl;
cap->issued = new_caps;
cap->implemented |= new_caps;
if (cap == in->auth_cap) {
// non-auth MDS is revoking the newly grant caps ?
- for (map<mds_rank_t, Cap*>::iterator it = in->caps.begin(); it != in->caps.end(); ++it) {
- if (it->second == cap)
+ for (const auto &p : in->caps) {
+ if (&p.second == cap)
continue;
- if (it->second->implemented & ~it->second->issued & new_caps) {
+ if (p.second.implemented & ~p.second.issued & new_caps) {
check = true;
break;
}
// may drop inode's last ref
if (deleted_inode)
_try_to_trim_inode(in, true);
-
- m->put();
}
int Client::inode_permission(Inode *in, const UserPerm& perms, unsigned want)
return vinodeno_t(in->ino, in->snapid);
}
-inodeno_t Client::_get_inodeno(Inode *in)
-{
- /* The caller must hold the client lock */
- return in->ino;
-}
-
-
/**
* Resolve an MDS spec to a list of MDS daemon GIDs.
*
const std::string &mds_spec,
std::vector<mds_gid_t> *targets)
{
- assert(fsmap);
- assert(targets != nullptr);
+ ceph_assert(fsmap);
+ ceph_assert(targets != nullptr);
mds_role_t role;
std::stringstream ss;
*/
int Client::authenticate()
{
- assert(client_lock.is_locked_by_me());
+ ceph_assert(ceph_mutex_is_locked_by_me(client_lock));
if (monclient->is_authenticated()) {
return 0;
}
- client_lock.Unlock();
+ client_lock.unlock();
int r = monclient->authenticate(cct->_conf->client_mount_timeout);
- client_lock.Lock();
+ client_lock.lock();
if (r < 0) {
return r;
}
do {
C_SaferCond cond;
monclient->get_version("fsmap", &fsmap_latest, NULL, &cond);
- client_lock.Unlock();
+ client_lock.unlock();
r = cond.wait();
- client_lock.Lock();
+ client_lock.lock();
} while (r == -EAGAIN);
if (r < 0) {
monclient->renew_subs();
wait_on_list(waiting_for_fsmap);
}
- assert(fsmap_user);
- assert(fsmap_user->get_epoch() >= fsmap_latest);
+ ceph_assert(fsmap_user);
+ ceph_assert(fsmap_user->get_epoch() >= fsmap_latest);
} else {
if (!fsmap || fsmap->get_epoch() < fsmap_latest) {
monclient->sub_want("fsmap", fsmap_latest, CEPH_SUBSCRIBE_ONETIME);
monclient->renew_subs();
wait_on_list(waiting_for_fsmap);
}
- assert(fsmap);
- assert(fsmap->get_epoch() >= fsmap_latest);
+ ceph_assert(fsmap);
+ ceph_assert(fsmap->get_epoch() >= fsmap_latest);
}
ldout(cct, 10) << __func__ << " finished waiting for FSMap version "
<< fsmap_latest << dendl;
string *outs,
Context *onfinish)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (!initialized)
return -ENOTCONN;
const auto info = fsmap->get_info_gid(target_gid);
// Open a connection to the target MDS
- entity_inst_t inst = info.get_inst();
- ConnectionRef conn = messenger->get_connection(inst);
+ ConnectionRef conn = messenger->connect_to_mds(info.get_addrs());
// Generate MDSCommandOp state
auto &op = command_table.start_command();
<< " tid=" << op.tid << cmd << dendl;
// Construct and send MCommand
- MCommand *m = op.get_message(monclient->get_fsid());
- conn->send_message(m);
+ auto m = op.get_message(monclient->get_fsid());
+ conn->send_message2(std::move(m));
}
gather.activate();
return 0;
}
-void Client::handle_command_reply(MCommandReply *m)
+void Client::handle_command_reply(const MConstRef<MCommandReply>& m)
{
ceph_tid_t const tid = m->get_tid();
if (!command_table.exists(tid)) {
ldout(cct, 1) << __func__ << ": unknown tid " << tid << ", dropping" << dendl;
- m->put();
return;
}
auto &op = command_table.get_command(tid);
if (op.outbl) {
- op.outbl->claim(m->get_data());
+ *op.outbl = m->get_data();
}
if (op.outs) {
*op.outs = m->rs;
}
command_table.erase(tid);
-
- m->put();
}
// -------------------
// MOUNT
-int Client::mount(const std::string &mount_root, const UserPerm& perms,
- bool require_mds)
+int Client::subscribe_mdsmap(const std::string &fs_name)
{
- Mutex::Locker lock(client_lock);
-
- if (mounted) {
- ldout(cct, 5) << "already mounted" << dendl;
- return 0;
- }
-
- unmounting = false;
-
int r = authenticate();
if (r < 0) {
lderr(cct) << "authentication failed: " << cpp_strerror(r) << dendl;
return r;
}
+ std::string resolved_fs_name;
+ if (fs_name.empty()) {
+ resolved_fs_name = cct->_conf.get_val<std::string>("client_fs");
+ if (resolved_fs_name.empty())
+ // Try the backwards compatibility fs name option
+ resolved_fs_name = cct->_conf.get_val<std::string>("client_mds_namespace");
+ } else {
+ resolved_fs_name = fs_name;
+ }
+
std::string want = "mdsmap";
- const auto &mds_ns = cct->_conf->client_mds_namespace;
- if (!mds_ns.empty()) {
+ if (!resolved_fs_name.empty()) {
r = fetch_fsmap(true);
if (r < 0)
return r;
- fs_cluster_id_t cid = fsmap_user->get_fs_cid(mds_ns);
- if (cid == FS_CLUSTER_ID_NONE)
+ fscid = fsmap_user->get_fs_cid(resolved_fs_name);
+ if (fscid == FS_CLUSTER_ID_NONE) {
return -ENOENT;
+ }
std::ostringstream oss;
- oss << want << "." << cid;
+ oss << want << "." << fscid;
want = oss.str();
}
ldout(cct, 10) << "Subscribing to map '" << want << "'" << dendl;
monclient->sub_want(want, 0, 0);
monclient->renew_subs();
+ return 0;
+}
+
+int Client::mount(const std::string &mount_root, const UserPerm& perms,
+ bool require_mds, const std::string &fs_name)
+{
+ std::lock_guard lock(client_lock);
+
+ if (mounted) {
+ ldout(cct, 5) << "already mounted" << dendl;
+ return 0;
+ }
+
+ unmounting = false;
+
+ int r = subscribe_mdsmap(fs_name);
+ if (r < 0) {
+ lderr(cct) << "mdsmap subscription failed: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
tick(); // start tick
if (require_mds) {
break;
}
- assert(root);
+ ceph_assert(root);
_ll_get(root);
mounted = true;
void Client::_close_sessions()
{
+ for (auto it = mds_sessions.begin(); it != mds_sessions.end(); ) {
+ if (it->second.state == MetaSession::STATE_REJECTED)
+ mds_sessions.erase(it++);
+ else
+ ++it;
+ }
+
while (!mds_sessions.empty()) {
// send session closes!
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end();
- ++p) {
- if (p->second->state != MetaSession::STATE_CLOSING) {
- _close_mds_session(p->second);
+ for (auto &p : mds_sessions) {
+ if (p.second.state != MetaSession::STATE_CLOSING) {
+ _close_mds_session(&p.second);
+ mds_ranks_closing.insert(p.first);
}
}
// wait for sessions to close
- ldout(cct, 2) << "waiting for " << mds_sessions.size() << " mds sessions to close" << dendl;
- mount_cond.Wait(client_lock);
+ double timo = cct->_conf.get_val<std::chrono::seconds>("client_shutdown_timeout").count();
+ ldout(cct, 2) << "waiting for " << mds_ranks_closing.size() << " mds session(s) to close (timeout: "
+ << timo << "s)" << dendl;
+ std::unique_lock l{client_lock, std::adopt_lock};
+ if (!timo) {
+ mount_cond.wait(l);
+ } else if (!mount_cond.wait_for(l, ceph::make_timespan(timo), [this] { return mds_ranks_closing.empty(); })) {
+ ldout(cct, 1) << mds_ranks_closing.size() << " mds(s) did not respond to session close -- timing out." << dendl;
+ while (!mds_ranks_closing.empty()) {
+ auto session = mds_sessions.at(*mds_ranks_closing.begin());
+ // this prunes entry from mds_sessions and mds_ranks_closing
+ _closed_mds_session(&session, -ETIMEDOUT);
+ }
+ }
+
+ mds_ranks_closing.clear();
+ l.release();
}
}
{
if (mds_requests.empty())
return;
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end();
- ++p) {
- MetaSession *s = p->second;
- flush_mdlog(s);
+ for (auto &p : mds_sessions) {
+ flush_mdlog(&p.second);
}
}
// will crash if they see an unknown CEPH_SESSION_* value in this msg.
const uint64_t features = session->con->get_features();
if (HAVE_FEATURE(features, SERVER_LUMINOUS)) {
- MClientSession *m = new MClientSession(CEPH_SESSION_REQUEST_FLUSH_MDLOG);
- session->con->send_message(m);
+ auto m = make_message<MClientSession>(CEPH_SESSION_REQUEST_FLUSH_MDLOG);
+ session->con->send_message2(std::move(m));
}
}
-void Client::_unmount()
+void Client::_abort_mds_sessions(int err)
+{
+ for (auto p = mds_requests.begin(); p != mds_requests.end(); ) {
+ auto req = p->second;
+ ++p;
+ // unsafe requests will be removed during close session below.
+ if (req->got_unsafe)
+ continue;
+
+ req->abort(err);
+ if (req->caller_cond) {
+ req->kick = true;
+ req->caller_cond->notify_all();
+ }
+ }
+
+ // Process aborts on any requests that were on this waitlist.
+ // Any requests that were on a waiting_for_open session waitlist
+ // will get kicked during close session below.
+ signal_cond_list(waiting_for_mdsmap);
+
+ // Force-close all sessions
+ while(!mds_sessions.empty()) {
+ auto& session = mds_sessions.begin()->second;
+ _closed_mds_session(&session, err);
+ }
+}
+
+void Client::_unmount(bool abort)
{
+ std::unique_lock lock{client_lock, std::adopt_lock};
if (unmounting)
return;
- ldout(cct, 2) << "unmounting" << dendl;
+ if (abort || blacklisted) {
+ ldout(cct, 2) << "unmounting (" << (abort ? "abort)" : "blacklisted)") << dendl;
+ } else {
+ ldout(cct, 2) << "unmounting" << dendl;
+ }
unmounting = true;
deleg_timeout = 0;
- flush_mdlog_sync(); // flush the mdlog for pending requests, if any
- while (!mds_requests.empty()) {
- ldout(cct, 10) << "waiting on " << mds_requests.size() << " requests" << dendl;
- mount_cond.Wait(client_lock);
+ if (abort) {
+ // Abort all mds sessions
+ _abort_mds_sessions(-ENOTCONN);
+
+ objecter->op_cancel_writes(-ENOTCONN);
+ } else {
+ // flush the mdlog for pending requests, if any
+ flush_mdlog_sync();
}
+ mount_cond.wait(lock, [this] {
+ if (!mds_requests.empty()) {
+ ldout(cct, 10) << "waiting on " << mds_requests.size() << " requests"
+ << dendl;
+ }
+ return mds_requests.empty();
+ });
if (tick_event)
timer.cancel_event(tick_event);
tick_event = 0;
_ll_drop_pins();
- if (blacklisted) {
- ldout(cct, 0) << " skipping clean shutdown, we are blacklisted" << dendl;
-
- if (cct->_conf->client_oc) {
- // Purge all cached data so that ObjectCacher doesn't get hung up
- // trying to flush it. ObjectCacher's behaviour on EBLACKLISTED
- // is to just leave things marked dirty
- // (http://tracker.ceph.com/issues/9105)
- for (const auto &i : inode_map) {
- objectcacher->purge_set(&(i.second->oset));
- }
+ mount_cond.wait(lock, [this] {
+ if (unsafe_sync_write > 0) {
+ ldout(cct, 0) << unsafe_sync_write << " unsafe_sync_writes, waiting"
+ << dendl;
}
-
- mounted = false;
- return;
- }
-
- while (unsafe_sync_write > 0) {
- ldout(cct, 0) << unsafe_sync_write << " unsafe_sync_writes, waiting" << dendl;
- mount_cond.Wait(client_lock);
- }
+ return unsafe_sync_write <= 0;
+ });
if (cct->_conf->client_oc) {
// flush/release all buffered data
- ceph::unordered_map<vinodeno_t, Inode*>::iterator next;
- for (ceph::unordered_map<vinodeno_t, Inode*>::iterator p = inode_map.begin();
- p != inode_map.end();
- p = next) {
- next = p;
- ++next;
- Inode *in = p->second;
+ std::list<InodeRef> anchor;
+ for (auto& p : inode_map) {
+ Inode *in = p.second;
if (!in) {
- ldout(cct, 0) << "null inode_map entry ino " << p->first << dendl;
- assert(in);
+ ldout(cct, 0) << "null inode_map entry ino " << p.first << dendl;
+ ceph_assert(in);
}
- if (!in->caps.empty()) {
- InodeRef tmp_ref(in);
+
+ // prevent inode from getting freed
+ anchor.emplace_back(in);
+
+ if (abort || blacklisted) {
+ objectcacher->purge_set(&in->oset);
+ } else if (!in->caps.empty()) {
_release(in);
_flush(in, new C_Client_FlushComplete(this, in));
}
}
}
- flush_caps_sync();
- wait_sync_caps(last_flush_tid);
+ if (abort || blacklisted) {
+ for (auto p = dirty_list.begin(); !p.end(); ) {
+ Inode *in = *p;
+ ++p;
+ if (in->dirty_caps) {
+ ldout(cct, 0) << " drop dirty caps on " << *in << dendl;
+ in->mark_caps_clean();
+ put_inode(in);
+ }
+ }
+ } else {
+ flush_caps_sync();
+ wait_sync_caps(last_flush_tid);
+ }
// empty lru cache
trim_cache();
<< "+" << inode_map.size() << " items"
<< ", waiting (for caps to release?)"
<< dendl;
- utime_t until = ceph_clock_now() + utime_t(5, 0);
- int r = mount_cond.WaitUntil(client_lock, until);
- if (r == ETIMEDOUT) {
+ if (auto r = mount_cond.wait_for(lock, ceph::make_timespan(5));
+ r == std::cv_status::timeout) {
dump_cache(NULL);
}
}
- assert(lru.lru_get_size() == 0);
- assert(inode_map.empty());
+ ceph_assert(lru.lru_get_size() == 0);
+ ceph_assert(inode_map.empty());
// stop tracing
if (!cct->_conf->client_trace.empty()) {
mounted = false;
+ lock.release();
ldout(cct, 2) << "unmounted." << dendl;
}
void Client::unmount()
{
- Mutex::Locker lock(client_lock);
- _unmount();
+ std::lock_guard lock(client_lock);
+ _unmount(false);
+}
+
+void Client::abort_conn()
+{
+ std::lock_guard lock(client_lock);
+ _unmount(true);
}
void Client::flush_cap_releases()
{
// send any cap releases
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end();
- ++p) {
- if (p->second->release && mdsmap->is_clientreplay_or_active_or_stopping(
- p->first)) {
+ for (auto &p : mds_sessions) {
+ auto &session = p.second;
+ if (session.release && mdsmap->is_clientreplay_or_active_or_stopping(
+ p.first)) {
if (cct->_conf->client_inject_release_failure) {
ldout(cct, 20) << __func__ << " injecting failure to send cap release message" << dendl;
- p->second->release->put();
} else {
- p->second->con->send_message(p->second->release);
+ session.con->send_message2(std::move(session.release));
}
- p->second->release = 0;
+ session.release.reset();
}
}
}
{
if (cct->_conf->client_debug_inject_tick_delay > 0) {
sleep(cct->_conf->client_debug_inject_tick_delay);
- assert(0 == cct->_conf->set_val("client_debug_inject_tick_delay", "0"));
- cct->_conf->apply_changes(NULL);
+ ceph_assert(0 == cct->_conf.set_val("client_debug_inject_tick_delay", "0"));
+ cct->_conf.apply_changes(nullptr);
}
ldout(cct, 21) << "tick" << dendl;
tick_event = timer.add_event_after(
cct->_conf->client_tick_interval,
- new FunctionContext([this](int) {
+ new LambdaContext([this](int) {
// Called back via Timer, which takes client_lock for us
- assert(client_lock.is_locked_by_me());
+ ceph_assert(ceph_mutex_is_locked_by_me(client_lock));
tick();
}));
utime_t now = ceph_clock_now();
req->abort(-ETIMEDOUT);
if (req->caller_cond) {
req->kick = true;
- req->caller_cond->Signal();
+ req->caller_cond->notify_all();
}
signal_cond_list(waiting_for_mdsmap);
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end();
- ++p)
- signal_context_list(p->second->waiting_for_open);
+ for (auto &p : mds_sessions) {
+ signal_context_list(p.second.waiting_for_open);
+ }
}
}
}
trim_cache(true);
+
+ if (blacklisted && mounted &&
+ last_auto_reconnect + 30 * 60 < now &&
+ cct->_conf.get_val<bool>("client_reconnect_stale")) {
+ messenger->client_reset();
+ fd_gen++; // invalidate open files
+ blacklisted = false;
+ _kick_stale_sessions();
+ last_auto_reconnect = now;
+ }
}
void Client::renew_caps()
ldout(cct, 10) << "renew_caps()" << dendl;
last_cap_renew = ceph_clock_now();
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end();
- ++p) {
- ldout(cct, 15) << "renew_caps requesting from mds." << p->first << dendl;
- if (mdsmap->get_state(p->first) >= MDSMap::STATE_REJOIN)
- renew_caps(p->second);
+ for (auto &p : mds_sessions) {
+ ldout(cct, 15) << "renew_caps requesting from mds." << p.first << dendl;
+ if (mdsmap->get_state(p.first) >= MDSMap::STATE_REJOIN)
+ renew_caps(&p.second);
}
}
ldout(cct, 10) << "renew_caps mds." << session->mds_num << dendl;
session->last_cap_renew_request = ceph_clock_now();
uint64_t seq = ++session->cap_renew_seq;
- session->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_RENEWCAPS, seq));
+ session->con->send_message2(make_message<MClientSession>(CEPH_SESSION_REQUEST_RENEWCAPS, seq));
}
mask |= DEBUG_GETATTR_CAPS;
req->head.args.getattr.mask = mask;
- ldout(cct, 10) << "_do_lookup on " << path << dendl;
+ ldout(cct, 10) << __func__ << " on " << path << dendl;
int r = make_request(req, perms, target);
- ldout(cct, 10) << "_do_lookup res is " << r << dendl;
+ ldout(cct, 10) << __func__ << " res is " << r << dendl;
return r;
}
{
int r = 0;
Dentry *dn = NULL;
-
- if (!dir->is_dir()) {
- r = -ENOTDIR;
- goto done;
- }
+ // can only request shared caps
+ mask &= CEPH_CAP_ANY_SHARED | CEPH_STAT_RSTAT;
if (dname == "..") {
- if (dir->dn_set.empty())
- *target = dir;
+ if (dir->dentries.empty()) {
+ MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPPARENT);
+ filepath path(dir->ino);
+ req->set_filepath(path);
+
+ InodeRef tmptarget;
+ int r = make_request(req, perms, &tmptarget, NULL, rand() % mdsmap->get_num_in_mds());
+
+ if (r == 0) {
+ *target = std::move(tmptarget);
+ ldout(cct, 8) << __func__ << " found target " << (*target)->ino << dendl;
+ } else {
+ *target = dir;
+ }
+ }
else
*target = dir->get_first_parent()->dir->parent_inode; //dirs can't be hard-linked
goto done;
goto done;
}
+ if (!dir->is_dir()) {
+ r = -ENOTDIR;
+ goto done;
+ }
+
if (dname.length() > NAME_MAX) {
r = -ENAMETOOLONG;
goto done;
dir->dir->dentries.count(dname)) {
dn = dir->dir->dentries[dname];
- ldout(cct, 20) << "_lookup have dn " << dname << " mds." << dn->lease_mds << " ttl " << dn->lease_ttl
+ ldout(cct, 20) << __func__ << " have dn " << dname << " mds." << dn->lease_mds << " ttl " << dn->lease_ttl
<< " seq " << dn->lease_seq
<< dendl;
if (dn->lease_mds >= 0 &&
dn->lease_ttl > now &&
mds_sessions.count(dn->lease_mds)) {
- MetaSession *s = mds_sessions[dn->lease_mds];
- if (s->cap_ttl > now &&
- s->cap_gen == dn->lease_gen) {
+ MetaSession &s = mds_sessions.at(dn->lease_mds);
+ if (s.cap_ttl > now &&
+ s.cap_gen == dn->lease_gen) {
// touch this mds's dir cap too, even though we don't _explicitly_ use it here, to
// make trim_caps() behave.
dir->try_touch_cap(dn->lease_mds);
goto hit_dn;
}
- ldout(cct, 20) << " bad lease, cap_ttl " << s->cap_ttl << ", cap_gen " << s->cap_gen
+ ldout(cct, 20) << " bad lease, cap_ttl " << s.cap_ttl << ", cap_gen " << s.cap_gen
<< " vs lease_gen " << dn->lease_gen << dendl;
}
- // dir lease?
+ // dir shared caps?
if (dir->caps_issued_mask(CEPH_CAP_FILE_SHARED, true)) {
if (dn->cap_shared_gen == dir->shared_gen &&
(!dn->inode || dn->inode->caps_issued_mask(mask, true)))
goto hit_dn;
if (!dn->inode && (dir->flags & I_COMPLETE)) {
- ldout(cct, 10) << "_lookup concluded ENOENT locally for "
+ ldout(cct, 10) << __func__ << " concluded ENOENT locally for "
<< *dir << " dn '" << dname << "'" << dendl;
return -ENOENT;
}
// can we conclude ENOENT locally?
if (dir->caps_issued_mask(CEPH_CAP_FILE_SHARED, true) &&
(dir->flags & I_COMPLETE)) {
- ldout(cct, 10) << "_lookup concluded ENOENT locally for " << *dir << " dn '" << dname << "'" << dendl;
+ ldout(cct, 10) << __func__ << " concluded ENOENT locally for " << *dir << " dn '" << dname << "'" << dendl;
return -ENOENT;
}
}
done:
if (r < 0)
- ldout(cct, 10) << "_lookup " << *dir << " " << dname << " = " << r << dendl;
+ ldout(cct, 10) << __func__ << " " << *dir << " " << dname << " = " << r << dendl;
else
- ldout(cct, 10) << "_lookup " << *dir << " " << dname << " = " << **target << dendl;
+ ldout(cct, 10) << __func__ << " " << *dir << " " << dname << " = " << **target << dendl;
return r;
}
Dentry **pdn, bool expect_null)
{
// lookup
- ldout(cct, 20) << "get_or_create " << *dir << " name " << name << dendl;
+ ldout(cct, 20) << __func__ << " " << *dir << " name " << name << dendl;
dir->open_dir();
if (dir->dir->dentries.count(name)) {
Dentry *dn = dir->dir->dentries[name];
dn->lease_mds >= 0 &&
dn->lease_ttl > now &&
mds_sessions.count(dn->lease_mds)) {
- MetaSession *s = mds_sessions[dn->lease_mds];
- if (s->cap_ttl > now &&
- s->cap_gen == dn->lease_gen) {
+ MetaSession &s = mds_sessions.at(dn->lease_mds);
+ if (s.cap_ttl > now &&
+ s.cap_gen == dn->lease_gen) {
if (expect_null)
return -EEXIST;
}
cur = root;
else
cur = cwd;
- assert(cur);
+ ceph_assert(cur);
- ldout(cct, 10) << "path_walk " << path << dendl;
+ ldout(cct, 10) << __func__ << " " << path << dendl;
int symlinks = 0;
int Client::link(const char *relexisting, const char *relpath, const UserPerm& perm)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "link" << std::endl;
tout(cct) << relexisting << std::endl;
tout(cct) << relpath << std::endl;
int Client::unlink(const char *relpath, const UserPerm& perm)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "unlink" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
if (unmounting)
int Client::rename(const char *relfrom, const char *relto, const UserPerm& perm)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "rename" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relfrom << std::endl;
tout(cct) << relto << std::endl;
int Client::mkdir(const char *relpath, mode_t mode, const UserPerm& perm)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "mkdir" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << mode << std::endl;
- ldout(cct, 10) << "mkdir: " << relpath << dendl;
+ ldout(cct, 10) << __func__ << ": " << relpath << dendl;
if (unmounting)
return -ENOTCONN;
int Client::mkdirs(const char *relpath, mode_t mode, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
ldout(cct, 10) << "Client::mkdirs " << relpath << dendl;
- tout(cct) << "mkdirs" << std::endl;
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << mode << std::endl;
break;
cur.swap(next);
}
- //check that we have work left to do
- if (i==path.depth()) return -EEXIST;
if (r!=-ENOENT) return r;
- ldout(cct, 20) << "mkdirs got through " << i << " directories on path " << relpath << dendl;
+ ldout(cct, 20) << __func__ << " got through " << i << " directories on path " << relpath << dendl;
//make new directory at each level
for (; i<path.depth(); ++i) {
if (cct->_conf->client_permissions) {
return r;
//move to new dir and continue
cur.swap(next);
- ldout(cct, 20) << "mkdirs: successfully created directory "
+ ldout(cct, 20) << __func__ << ": successfully created directory "
<< filepath(cur->ino).get_path() << dendl;
}
return 0;
int Client::rmdir(const char *relpath, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "rmdir" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
if (unmounting)
int Client::mknod(const char *relpath, mode_t mode, const UserPerm& perms, dev_t rdev)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "mknod" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << mode << std::endl;
tout(cct) << rdev << std::endl;
int Client::symlink(const char *target, const char *relpath, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "symlink" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << target << std::endl;
tout(cct) << relpath << std::endl;
int Client::readlink(const char *relpath, char *buf, loff_t size, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "readlink" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
if (unmounting)
{
bool yes = in->caps_issued_mask(mask, true);
- ldout(cct, 10) << "_getattr mask " << ccap_string(mask) << " issued=" << yes << dendl;
+ ldout(cct, 10) << __func__ << " mask " << ccap_string(mask) << " issued=" << yes << dendl;
if (yes && !force)
return 0;
req->head.args.getattr.mask = mask;
int res = make_request(req, perms);
- ldout(cct, 10) << "_getattr result=" << res << dendl;
+ ldout(cct, 10) << __func__ << " result=" << res << dendl;
return res;
}
{
int issued = in->caps_issued();
- ldout(cct, 10) << "_setattr mask " << mask << " issued " <<
+ ldout(cct, 10) << __func__ << " mask " << mask << " issued " <<
ccap_string(issued) << dendl;
if (in->snapid != CEPH_NOSNAP) {
stx->stx_mode = st->st_mode;
stx->stx_uid = st->st_uid;
stx->stx_gid = st->st_gid;
+#ifdef __APPLE__
+ stx->stx_mtime = st->st_mtimespec;
+ stx->stx_atime = st->st_atimespec;
+#else
stx->stx_mtime = st->st_mtim;
stx->stx_atime = st->st_atim;
+#endif
}
int Client::__setattrx(Inode *in, struct ceph_statx *stx, int mask,
int Client::setattr(const char *relpath, struct stat *attr, int mask,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "setattr" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << mask << std::endl;
int Client::setattrx(const char *relpath, struct ceph_statx *stx, int mask,
const UserPerm& perms, int flags)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "setattrx" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << mask << std::endl;
int Client::fsetattr(int fd, struct stat *attr, int mask, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "fsetattr" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << mask << std::endl;
int Client::fsetattrx(int fd, struct ceph_statx *stx, int mask, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "fsetattr" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << mask << std::endl;
int Client::stat(const char *relpath, struct stat *stbuf, const UserPerm& perms,
frag_info_t *dirstat, int mask)
{
- ldout(cct, 3) << "stat enter (relpath " << relpath << " mask " << mask << ")" << dendl;
- Mutex::Locker lock(client_lock);
+ ldout(cct, 3) << __func__ << " enter (relpath " << relpath << " mask " << mask << ")" << dendl;
+ std::lock_guard lock(client_lock);
tout(cct) << "stat" << std::endl;
tout(cct) << relpath << std::endl;
return r;
r = _getattr(in, mask, perms);
if (r < 0) {
- ldout(cct, 3) << "stat exit on error!" << dendl;
+ ldout(cct, 3) << __func__ << " exit on error!" << dendl;
return r;
}
fill_stat(in, stbuf, dirstat);
- ldout(cct, 3) << "stat exit (relpath " << relpath << " mask " << mask << ")" << dendl;
+ ldout(cct, 3) << __func__ << " exit (relpath " << relpath << " mask " << mask << ")" << dendl;
return r;
}
mask |= CEPH_CAP_AUTH_SHARED;
if (want & (CEPH_STATX_NLINK|CEPH_STATX_CTIME|CEPH_STATX_VERSION))
mask |= CEPH_CAP_LINK_SHARED;
- if (want & (CEPH_STATX_ATIME|CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_SIZE|CEPH_STATX_BLOCKS|CEPH_STATX_VERSION))
+ if (want & (CEPH_STATX_NLINK|CEPH_STATX_ATIME|CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_SIZE|CEPH_STATX_BLOCKS|CEPH_STATX_VERSION))
mask |= CEPH_CAP_FILE_SHARED;
if (want & (CEPH_STATX_VERSION|CEPH_STATX_CTIME))
mask |= CEPH_CAP_XATTR_SHARED;
const UserPerm& perms,
unsigned int want, unsigned int flags)
{
- ldout(cct, 3) << "statx enter (relpath " << relpath << " want " << want << ")" << dendl;
- Mutex::Locker lock(client_lock);
+ ldout(cct, 3) << __func__ << " enter (relpath " << relpath << " want " << want << ")" << dendl;
+ std::lock_guard lock(client_lock);
tout(cct) << "statx" << std::endl;
tout(cct) << relpath << std::endl;
r = _getattr(in, mask, perms);
if (r < 0) {
- ldout(cct, 3) << "statx exit on error!" << dendl;
+ ldout(cct, 3) << __func__ << " exit on error!" << dendl;
return r;
}
fill_statx(in, mask, stx);
- ldout(cct, 3) << "statx exit (relpath " << relpath << " mask " << stx->stx_mask << ")" << dendl;
+ ldout(cct, 3) << __func__ << " exit (relpath " << relpath << " mask " << stx->stx_mask << ")" << dendl;
return r;
}
int Client::lstat(const char *relpath, struct stat *stbuf,
const UserPerm& perms, frag_info_t *dirstat, int mask)
{
- ldout(cct, 3) << "lstat enter (relpath " << relpath << " mask " << mask << ")" << dendl;
- Mutex::Locker lock(client_lock);
- tout(cct) << "lstat" << std::endl;
+ ldout(cct, 3) << __func__ << " enter (relpath " << relpath << " mask " << mask << ")" << dendl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
if (unmounting)
return r;
r = _getattr(in, mask, perms);
if (r < 0) {
- ldout(cct, 3) << "lstat exit on error!" << dendl;
+ ldout(cct, 3) << __func__ << " exit on error!" << dendl;
return r;
}
fill_stat(in, stbuf, dirstat);
- ldout(cct, 3) << "lstat exit (relpath " << relpath << " mask " << mask << ")" << dendl;
+ ldout(cct, 3) << __func__ << " exit (relpath " << relpath << " mask " << mask << ")" << dendl;
return r;
}
int Client::fill_stat(Inode *in, struct stat *st, frag_info_t *dirstat, nest_info_t *rstat)
{
- ldout(cct, 10) << "fill_stat on " << in->ino << " snap/dev" << in->snapid
+ ldout(cct, 10) << __func__ << " on " << in->ino << " snap/dev" << in->snapid
<< " mode 0" << oct << in->mode << dec
<< " mtime " << in->mtime << " ctime " << in->ctime << dendl;
memset(st, 0, sizeof(struct stat));
st->st_size = in->size;
st->st_blocks = (in->size + 511) >> 9;
}
- st->st_blksize = MAX(in->layout.stripe_unit, 4096);
+ st->st_blksize = std::max<uint32_t>(in->layout.stripe_unit, 4096);
if (dirstat)
*dirstat = in->dirstat;
void Client::fill_statx(Inode *in, unsigned int mask, struct ceph_statx *stx)
{
- ldout(cct, 10) << "fill_statx on " << in->ino << " snap/dev" << in->snapid
+ ldout(cct, 10) << __func__ << " on " << in->ino << " snap/dev" << in->snapid
<< " mode 0" << oct << in->mode << dec
<< " mtime " << in->mtime << " ctime " << in->ctime << dendl;
memset(stx, 0, sizeof(struct ceph_statx));
/* These are always considered to be available */
stx->stx_dev = in->snapid;
- stx->stx_blksize = MAX(in->layout.stripe_unit, 4096);
+ stx->stx_blksize = std::max<uint32_t>(in->layout.stripe_unit, 4096);
/* Type bits are always set, even when CEPH_STATX_MODE is not */
stx->stx_mode = S_IFMT & in->mode;
int Client::chmod(const char *relpath, mode_t mode, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "chmod" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << mode << std::endl;
int Client::fchmod(int fd, mode_t mode, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "fchmod" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << mode << std::endl;
int Client::lchmod(const char *relpath, mode_t mode, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "lchmod" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << mode << std::endl;
int Client::chown(const char *relpath, uid_t new_uid, gid_t new_gid,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "chown" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << new_uid << std::endl;
tout(cct) << new_gid << std::endl;
int Client::fchown(int fd, uid_t new_uid, gid_t new_gid, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "fchown" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << new_uid << std::endl;
tout(cct) << new_gid << std::endl;
int Client::lchown(const char *relpath, uid_t new_uid, gid_t new_gid,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "lchown" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
tout(cct) << new_uid << std::endl;
tout(cct) << new_gid << std::endl;
return _setattr(in, &attr, mask, perms);
}
+static void attr_set_atime_and_mtime(struct stat *attr,
+ const utime_t &atime,
+ const utime_t &mtime)
+{
+ stat_set_atime_sec(attr, atime.tv.tv_sec);
+ stat_set_atime_nsec(attr, atime.tv.tv_nsec);
+ stat_set_mtime_sec(attr, mtime.tv.tv_sec);
+ stat_set_mtime_nsec(attr, mtime.tv.tv_nsec);
+}
+
+// for [l]utime() invoke the timeval variant as the timespec
+// variant are not yet implemented. for futime[s](), invoke
+// the timespec variant.
int Client::utime(const char *relpath, struct utimbuf *buf,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "utime" << std::endl;
+ struct timeval tv[2];
+ tv[0].tv_sec = buf->actime;
+ tv[0].tv_usec = 0;
+ tv[1].tv_sec = buf->modtime;
+ tv[1].tv_usec = 0;
+
+ return utimes(relpath, tv, perms);
+}
+
+int Client::lutime(const char *relpath, struct utimbuf *buf,
+ const UserPerm& perms)
+{
+ struct timeval tv[2];
+ tv[0].tv_sec = buf->actime;
+ tv[0].tv_usec = 0;
+ tv[1].tv_sec = buf->modtime;
+ tv[1].tv_usec = 0;
+
+ return lutimes(relpath, tv, perms);
+}
+
+int Client::futime(int fd, struct utimbuf *buf, const UserPerm& perms)
+{
+ struct timespec ts[2];
+ ts[0].tv_sec = buf->actime;
+ ts[0].tv_nsec = 0;
+ ts[1].tv_sec = buf->modtime;
+ ts[1].tv_nsec = 0;
+
+ return futimens(fd, ts, perms);
+}
+
+int Client::utimes(const char *relpath, struct timeval times[2],
+ const UserPerm& perms)
+{
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
- tout(cct) << buf->modtime << std::endl;
- tout(cct) << buf->actime << std::endl;
+ tout(cct) << "atime: " << times[0].tv_sec << "." << times[0].tv_usec
+ << std::endl;
+ tout(cct) << "mtime: " << times[1].tv_sec << "." << times[1].tv_usec
+ << std::endl;
if (unmounting)
return -ENOTCONN;
if (r < 0)
return r;
struct stat attr;
- stat_set_mtime_sec(&attr, buf->modtime);
- stat_set_mtime_nsec(&attr, 0);
- stat_set_atime_sec(&attr, buf->actime);
- stat_set_atime_nsec(&attr, 0);
+ utime_t atime(times[0]);
+ utime_t mtime(times[1]);
+
+ attr_set_atime_and_mtime(&attr, atime, mtime);
return _setattr(in, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME, perms);
}
-int Client::lutime(const char *relpath, struct utimbuf *buf,
- const UserPerm& perms)
+int Client::lutimes(const char *relpath, struct timeval times[2],
+ const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "lutime" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
- tout(cct) << buf->modtime << std::endl;
- tout(cct) << buf->actime << std::endl;
+ tout(cct) << "atime: " << times[0].tv_sec << "." << times[0].tv_usec
+ << std::endl;
+ tout(cct) << "mtime: " << times[1].tv_sec << "." << times[1].tv_usec
+ << std::endl;
if (unmounting)
return -ENOTCONN;
filepath path(relpath);
InodeRef in;
- // don't follow symlinks
int r = path_walk(path, &in, perms, false);
if (r < 0)
return r;
struct stat attr;
- stat_set_mtime_sec(&attr, buf->modtime);
- stat_set_mtime_nsec(&attr, 0);
- stat_set_atime_sec(&attr, buf->actime);
- stat_set_atime_nsec(&attr, 0);
- return _setattr(in, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME, perms);
+ utime_t atime(times[0]);
+ utime_t mtime(times[1]);
+
+ attr_set_atime_and_mtime(&attr, atime, mtime);
+ return _setattr(in, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME, perms);
+}
+
+int Client::futimes(int fd, struct timeval times[2], const UserPerm& perms)
+{
+ struct timespec ts[2];
+ ts[0].tv_sec = times[0].tv_sec;
+ ts[0].tv_nsec = times[0].tv_usec * 1000;
+ ts[1].tv_sec = times[1].tv_sec;
+ ts[1].tv_nsec = times[1].tv_usec * 1000;
+
+ return futimens(fd, ts, perms);
+}
+
+int Client::futimens(int fd, struct timespec times[2], const UserPerm& perms)
+{
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
+ tout(cct) << fd << std::endl;
+ tout(cct) << "atime: " << times[0].tv_sec << "." << times[0].tv_nsec
+ << std::endl;
+ tout(cct) << "mtime: " << times[1].tv_sec << "." << times[1].tv_nsec
+ << std::endl;
+
+ if (unmounting)
+ return -ENOTCONN;
+
+ Fh *f = get_filehandle(fd);
+ if (!f)
+ return -EBADF;
+#if defined(__linux__) && defined(O_PATH)
+ if (f->flags & O_PATH)
+ return -EBADF;
+#endif
+ struct stat attr;
+ utime_t atime(times[0]);
+ utime_t mtime(times[1]);
+
+ attr_set_atime_and_mtime(&attr, atime, mtime);
+ return _setattr(f->inode, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME, perms);
}
int Client::flock(int fd, int operation, uint64_t owner)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "flock" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << operation << std::endl;
tout(cct) << owner << std::endl;
int Client::opendir(const char *relpath, dir_result_t **dirpp, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "opendir" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << relpath << std::endl;
if (unmounting)
return -ENOTDIR;
*dirpp = new dir_result_t(in, perms);
opened_dirs.insert(*dirpp);
- ldout(cct, 8) << "_opendir(" << in->ino << ") = " << 0 << " (" << *dirpp << ")" << dendl;
+ ldout(cct, 8) << __func__ << "(" << in->ino << ") = " << 0 << " (" << *dirpp << ")" << dendl;
return 0;
}
int Client::closedir(dir_result_t *dir)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "closedir" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << (unsigned long)dir << std::endl;
- ldout(cct, 3) << "closedir(" << dir << ") = 0" << dendl;
+ ldout(cct, 3) << __func__ << "(" << dir << ") = 0" << dendl;
_closedir(dir);
return 0;
}
void Client::_closedir(dir_result_t *dirp)
{
- ldout(cct, 10) << "_closedir(" << dirp << ")" << dendl;
+ ldout(cct, 10) << __func__ << "(" << dirp << ")" << dendl;
if (dirp->inode) {
- ldout(cct, 10) << "_closedir detaching inode " << dirp->inode << dendl;
+ ldout(cct, 10) << __func__ << " detaching inode " << dirp->inode << dendl;
dirp->inode.reset();
}
_readdir_drop_dirp_buffer(dirp);
void Client::rewinddir(dir_result_t *dirp)
{
- Mutex::Locker lock(client_lock);
- ldout(cct, 3) << "rewinddir(" << dirp << ")" << dendl;
+ std::lock_guard lock(client_lock);
+ ldout(cct, 3) << __func__ << "(" << dirp << ")" << dendl;
if (unmounting)
return;
loff_t Client::telldir(dir_result_t *dirp)
{
dir_result_t *d = static_cast<dir_result_t*>(dirp);
- ldout(cct, 3) << "telldir(" << dirp << ") = " << d->offset << dendl;
+ ldout(cct, 3) << __func__ << "(" << dirp << ") = " << d->offset << dendl;
return d->offset;
}
void Client::seekdir(dir_result_t *dirp, loff_t offset)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
- ldout(cct, 3) << "seekdir(" << dirp << ", " << offset << ")" << dendl;
+ ldout(cct, 3) << __func__ << "(" << dirp << ", " << offset << ")" << dendl;
if (unmounting)
return;
de->d_name[255] = '\0';
#ifndef __CYGWIN__
de->d_ino = ino;
-#if !defined(DARWIN) && !defined(__FreeBSD__)
+#if !defined(__APPLE__) && !defined(__FreeBSD__)
de->d_off = next_off;
#endif
de->d_reclen = 1;
de->d_type = IFTODT(type);
- ldout(cct, 10) << "fill_dirent '" << de->d_name << "' -> " << inodeno_t(de->d_ino)
+ ldout(cct, 10) << __func__ << " '" << de->d_name << "' -> " << inodeno_t(de->d_ino)
<< " type " << (int)de->d_type << " w/ next_off " << hex << next_off << dec << dendl;
#endif
}
frag_t fg = dirp->buffer_frag;
if (fg.is_rightmost()) {
- ldout(cct, 10) << "_readdir_next_frag advance from " << fg << " to END" << dendl;
+ ldout(cct, 10) << __func__ << " advance from " << fg << " to END" << dendl;
dirp->set_end();
return;
}
// advance
fg = fg.next();
- ldout(cct, 10) << "_readdir_next_frag advance from " << dirp->buffer_frag << " to " << fg << dendl;
+ ldout(cct, 10) << __func__ << " advance from " << dirp->buffer_frag << " to " << fg << dendl;
if (dirp->hash_order()) {
// keep last_name
void Client::_readdir_rechoose_frag(dir_result_t *dirp)
{
- assert(dirp->inode);
+ ceph_assert(dirp->inode);
if (dirp->hash_order())
return;
frag_t cur = frag_t(dirp->offset_high());
frag_t fg = dirp->inode->dirfragtree[cur.value()];
if (fg != cur) {
- ldout(cct, 10) << "_readdir_rechoose_frag frag " << cur << " maps to " << fg << dendl;
+ ldout(cct, 10) << __func__ << " frag " << cur << " maps to " << fg << dendl;
dirp->offset = dir_result_t::make_fpos(fg, 2, false);
dirp->last_name.clear();
dirp->next_offset = 2;
void Client::_readdir_drop_dirp_buffer(dir_result_t *dirp)
{
- ldout(cct, 10) << "_readdir_drop_dirp_buffer " << dirp << dendl;
+ ldout(cct, 10) << __func__ << " " << dirp << dendl;
dirp->buffer.clear();
}
int Client::_readdir_get_frag(dir_result_t *dirp)
{
- assert(dirp);
- assert(dirp->inode);
+ ceph_assert(dirp);
+ ceph_assert(dirp->inode);
// get the current frag.
frag_t fg;
else
fg = frag_t(dirp->offset_high());
- ldout(cct, 10) << "_readdir_get_frag " << dirp << " on " << dirp->inode->ino << " fg " << fg
+ ldout(cct, 10) << __func__ << " " << dirp << " on " << dirp->inode->ino << " fg " << fg
<< " offset " << hex << dirp->offset << dec << dendl;
int op = CEPH_MDS_OP_READDIR;
int res = make_request(req, dirp->perms, NULL, NULL, -1, &dirbl);
if (res == -EAGAIN) {
- ldout(cct, 10) << "_readdir_get_frag got EAGAIN, retrying" << dendl;
+ ldout(cct, 10) << __func__ << " got EAGAIN, retrying" << dendl;
_readdir_rechoose_frag(dirp);
return _readdir_get_frag(dirp);
}
if (res == 0) {
- ldout(cct, 10) << "_readdir_get_frag " << dirp << " got frag " << dirp->buffer_frag
+ ldout(cct, 10) << __func__ << " " << dirp << " got frag " << dirp->buffer_frag
<< " size " << dirp->buffer.size() << dendl;
} else {
- ldout(cct, 10) << "_readdir_get_frag got error " << res << ", setting end flag" << dendl;
+ ldout(cct, 10) << __func__ << " got error " << res << ", setting end flag" << dendl;
dirp->set_end();
}
int Client::_readdir_cache_cb(dir_result_t *dirp, add_dirent_cb_t cb, void *p,
int caps, bool getref)
{
- assert(client_lock.is_locked());
- ldout(cct, 10) << "_readdir_cache_cb " << dirp << " on " << dirp->inode->ino
+ ceph_assert(ceph_mutex_is_locked(client_lock));
+ ldout(cct, 10) << __func__ << " " << dirp << " on " << dirp->inode->ino
<< " last_name " << dirp->last_name << " offset " << hex << dirp->offset << dec
<< dendl;
Dir *dir = dirp->inode->dir;
string dn_name;
while (true) {
+ int mask = caps;
if (!dirp->inode->is_complete_and_ordered())
return -EAGAIN;
if (pd == dir->readdir_cache.end())
continue;
}
- int r = _getattr(dn->inode, caps, dirp->perms);
+ int idx = pd - dir->readdir_cache.begin();
+ if (dn->inode->is_dir()) {
+ mask |= CEPH_STAT_RSTAT;
+ }
+ int r = _getattr(dn->inode, mask, dirp->perms);
if (r < 0)
return r;
+
+ // the content of readdir_cache may change after _getattr(), so pd may be invalid iterator
+ pd = dir->readdir_cache.begin() + idx;
+ if (pd >= dir->readdir_cache.end() || *pd != dn)
+ return -EAGAIN;
struct ceph_statx stx;
struct dirent de;
fill_statx(dn->inode, caps, &stx);
uint64_t next_off = dn->offset + 1;
+ fill_dirent(&de, dn->name.c_str(), stx.stx_mode, stx.stx_ino, next_off);
++pd;
if (pd == dir->readdir_cache.end())
next_off = dir_result_t::END;
Inode *in = NULL;
- fill_dirent(&de, dn->name.c_str(), stx.stx_mode, stx.stx_ino, next_off);
if (getref) {
in = dn->inode.get();
_ll_get(in);
dn_name = dn->name; // fill in name while we have lock
- client_lock.Unlock();
+ client_lock.unlock();
r = cb(p, &de, &stx, next_off, in); // _next_ offset
- client_lock.Lock();
+ client_lock.lock();
ldout(cct, 15) << " de " << de.d_name << " off " << hex << dn->offset << dec
<< " = " << r << dendl;
if (r < 0) {
return r;
}
- ldout(cct, 10) << "_readdir_cache_cb " << dirp << " on " << dirp->inode->ino << " at end" << dendl;
+ ldout(cct, 10) << __func__ << " " << dirp << " on " << dirp->inode->ino << " at end" << dendl;
dirp->set_end();
return 0;
}
{
int caps = statx_to_mask(flags, want);
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
dir_result_t *dirp = static_cast<dir_result_t*>(d);
- ldout(cct, 10) << "readdir_r_cb " << *dirp->inode << " offset " << hex << dirp->offset
+ ldout(cct, 10) << __func__ << " " << *dirp->inode << " offset " << hex << dirp->offset
<< dec << " at_end=" << dirp->at_end()
<< " hash_order=" << dirp->hash_order() << dendl;
if (dirp->offset == 0) {
ldout(cct, 15) << " including ." << dendl;
- assert(diri->dn_set.size() < 2); // can't have multiple hard-links to a dir
+ ceph_assert(diri->dentries.size() < 2); // can't have multiple hard-links to a dir
uint64_t next_off = 1;
int r;
- r = _getattr(diri, caps, dirp->perms);
+ r = _getattr(diri, caps | CEPH_STAT_RSTAT, dirp->perms);
if (r < 0)
return r;
_ll_get(inode);
}
- client_lock.Unlock();
+ client_lock.unlock();
r = cb(p, &de, &stx, next_off, inode);
- client_lock.Lock();
+ client_lock.lock();
if (r < 0)
return r;
ldout(cct, 15) << " including .." << dendl;
uint64_t next_off = 2;
InodeRef in;
- if (diri->dn_set.empty())
+ if (diri->dentries.empty())
in = diri;
else
in = diri->get_first_parent()->dir->parent_inode;
int r;
- r = _getattr(in, caps, dirp->perms);
+ r = _getattr(in, caps | CEPH_STAT_RSTAT, dirp->perms);
if (r < 0)
return r;
_ll_get(inode);
}
- client_lock.Unlock();
+ client_lock.unlock();
r = cb(p, &de, &stx, next_off, inode);
- client_lock.Lock();
+ client_lock.lock();
if (r < 0)
return r;
int r;
if (check_caps) {
- r = _getattr(entry.inode, caps, dirp->perms);
+ int mask = caps;
+ if(entry.inode->is_dir()){
+ mask |= CEPH_STAT_RSTAT;
+ }
+ r = _getattr(entry.inode, mask, dirp->perms);
if (r < 0)
return r;
}
_ll_get(inode);
}
- client_lock.Unlock();
+ client_lock.unlock();
r = cb(p, &de, &stx, next_off, inode); // _next_ offset
- client_lock.Lock();
+ client_lock.lock();
ldout(cct, 15) << " de " << de.d_name << " off " << hex << next_off - 1 << dec
<< " = " << r << dendl;
if (diri->dir_ordered_count == dirp->ordered_count) {
ldout(cct, 10) << " marking (I_COMPLETE|I_DIR_ORDERED) on " << *diri << dendl;
if (diri->dir) {
- assert(diri->dir->readdir_cache.size() >= dirp->cache_index);
+ ceph_assert(diri->dir->readdir_cache.size() >= dirp->cache_index);
diri->dir->readdir_cache.resize(dirp->cache_index);
}
diri->flags |= I_COMPLETE | I_DIR_ORDERED;
struct dirent *Client::readdir(dir_result_t *d)
{
int ret;
- static struct dirent de;
+ auto& de = d->de;
single_readdir sr;
sr.de = &de;
sr.stx = NULL;
{
ldout(cct, 3) << "getdir(" << relpath << ")" << dendl;
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "getdir" << std::endl;
tout(cct) << relpath << std::endl;
}
mode_t mode, int stripe_unit, int stripe_count,
int object_size, const char *data_pool)
{
- ldout(cct, 3) << "open enter(" << relpath << ", " << ceph_flags_sys2wire(flags) << "," << mode << ")" << dendl;
- Mutex::Locker lock(client_lock);
+ int cflags = ceph_flags_sys2wire(flags);
+
+ ldout(cct, 3) << "open enter(" << relpath << ", " << cflags << "," << mode << ")" << dendl;
+ std::lock_guard lock(client_lock);
tout(cct) << "open" << std::endl;
tout(cct) << relpath << std::endl;
- tout(cct) << ceph_flags_sys2wire(flags) << std::endl;
+ tout(cct) << cflags << std::endl;
if (unmounting)
return -ENOTCONN;
bool created = false;
/* O_CREATE with O_EXCL enforces O_NOFOLLOW. */
bool followsym = !((flags & O_NOFOLLOW) || ((flags & O_CREAT) && (flags & O_EXCL)));
- int r = path_walk(path, &in, perms, followsym, ceph_caps_for_mode(mode));
+ int mask = ceph_caps_for_mode(ceph_flags_to_mode(cflags));
+
+ int r = path_walk(path, &in, perms, followsym, mask);
if (r == 0 && (flags & O_CREAT) && (flags & O_EXCL))
return -EEXIST;
r = _open(in.get(), flags, mode, &fh, perms);
if (r >= 0) {
// allocate a integer file descriptor
- assert(fh);
+ ceph_assert(fh);
r = get_fd();
- assert(fd_map.count(r) == 0);
+ ceph_assert(fd_map.count(r) == 0);
fd_map[r] = fh;
}
out:
tout(cct) << r << std::endl;
- ldout(cct, 3) << "open exit(" << path << ", " << ceph_flags_sys2wire(flags) << ") = " << r << dendl;
+ ldout(cct, 3) << "open exit(" << path << ", " << cflags << ") = " << r << dendl;
return r;
}
int Client::lookup_hash(inodeno_t ino, inodeno_t dirino, const char *name,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- ldout(cct, 3) << "lookup_hash enter(" << ino << ", #" << dirino << "/" << name << ")" << dendl;
+ std::lock_guard lock(client_lock);
+ ldout(cct, 3) << __func__ << " enter(" << ino << ", #" << dirino << "/" << name << ")" << dendl;
if (unmounting)
return -ENOTCONN;
int r = make_request(req, perms, NULL, NULL,
rand() % mdsmap->get_num_in_mds());
- ldout(cct, 3) << "lookup_hash exit(" << ino << ", #" << dirino << "/" << name << ") = " << r << dendl;
+ ldout(cct, 3) << __func__ << " exit(" << ino << ", #" << dirino << "/" << name << ") = " << r << dendl;
return r;
}
* the resulting Inode object in one operation, so that caller
* can safely assume inode will still be there after return.
*/
-int Client::_lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode)
+int Client::_lookup_vino(vinodeno_t vino, const UserPerm& perms, Inode **inode)
{
- ldout(cct, 8) << "lookup_ino enter(" << ino << ")" << dendl;
+ ldout(cct, 8) << __func__ << " enter(" << vino << ")" << dendl;
if (unmounting)
return -ENOTCONN;
+ if (is_reserved_vino(vino))
+ return -ESTALE;
+
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPINO);
- filepath path(ino);
+ filepath path(vino.ino);
req->set_filepath(path);
+ /*
+ * The MDS expects either a "real" snapid here or 0. The special value
+ * carveouts for the snapid are all at the end of the range so we can
+ * just look for any snapid below this value.
+ */
+ if (vino.snapid < CEPH_NOSNAP)
+ req->head.args.lookupino.snapid = vino.snapid;
+
int r = make_request(req, perms, NULL, NULL, rand() % mdsmap->get_num_in_mds());
if (r == 0 && inode != NULL) {
- vinodeno_t vino(ino, CEPH_NOSNAP);
unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino);
- assert(p != inode_map.end());
+ ceph_assert(p != inode_map.end());
*inode = p->second;
_ll_get(*inode);
}
- ldout(cct, 8) << "lookup_ino exit(" << ino << ") = " << r << dendl;
+ ldout(cct, 8) << __func__ << " exit(" << vino << ") = " << r << dendl;
return r;
}
int Client::lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode)
{
- Mutex::Locker lock(client_lock);
- return _lookup_ino(ino, perms, inode);
+ vinodeno_t vino(ino, CEPH_NOSNAP);
+ std::lock_guard lock(client_lock);
+ return _lookup_vino(vino, perms, inode);
}
/**
*/
int Client::_lookup_parent(Inode *ino, const UserPerm& perms, Inode **parent)
{
- ldout(cct, 8) << "lookup_parent enter(" << ino->ino << ")" << dendl;
-
- if (unmounting)
- return -ENOTCONN;
-
- if (!ino->dn_set.empty()) {
- // if we exposed the parent here, we'd need to check permissions,
- // but right now we just rely on the MDS doing so in make_request
- ldout(cct, 8) << "lookup_parent dentry already present" << dendl;
- return 0;
- }
-
- if (ino->is_root()) {
- *parent = NULL;
- ldout(cct, 8) << "ino is root, no parent" << dendl;
- return -EINVAL;
- }
+ ldout(cct, 8) << __func__ << " enter(" << ino->ino << ")" << dendl;
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPPARENT);
filepath path(ino->ino);
if (r == 0) {
*parent = target.get();
_ll_get(*parent);
- ldout(cct, 8) << "lookup_parent found parent " << (*parent)->ino << dendl;
+ ldout(cct, 8) << __func__ << " found parent " << (*parent)->ino << dendl;
} else {
*parent = NULL;
}
}
- ldout(cct, 8) << "lookup_parent exit(" << ino->ino << ") = " << r << dendl;
+ ldout(cct, 8) << __func__ << " exit(" << ino->ino << ") = " << r << dendl;
return r;
}
-int Client::lookup_parent(Inode *ino, const UserPerm& perms, Inode **parent)
-{
- Mutex::Locker lock(client_lock);
- return _lookup_parent(ino, perms, parent);
-}
-
/**
* Populate the parent dentry for `ino`, provided it is
* a child of `parent`.
*/
int Client::_lookup_name(Inode *ino, Inode *parent, const UserPerm& perms)
{
- assert(parent->is_dir());
- ldout(cct, 3) << "lookup_name enter(" << ino->ino << ")" << dendl;
+ ceph_assert(parent->is_dir());
+ ldout(cct, 3) << __func__ << " enter(" << ino->ino << ")" << dendl;
if (unmounting)
return -ENOTCONN;
req->set_inode(ino);
int r = make_request(req, perms, NULL, NULL, rand() % mdsmap->get_num_in_mds());
- ldout(cct, 3) << "lookup_name exit(" << ino->ino << ") = " << r << dendl;
+ ldout(cct, 3) << __func__ << " exit(" << ino->ino << ") = " << r << dendl;
return r;
}
int Client::lookup_name(Inode *ino, Inode *parent, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
return _lookup_name(ino, parent, perms);
}
- Fh *Client::_create_fh(Inode *in, int flags, int cmode, const UserPerm& perms)
+Fh *Client::_create_fh(Inode *in, int flags, int cmode, const UserPerm& perms)
{
- assert(in);
- Fh *f = new Fh(in);
- f->mode = cmode;
- f->flags = flags;
+ ceph_assert(in);
+ Fh *f = new Fh(in, flags, cmode, fd_gen, perms);
- // inode
- f->actor_perms = perms;
-
- ldout(cct, 10) << "_create_fh " << in->ino << " mode " << cmode << dendl;
+ ldout(cct, 10) << __func__ << " " << in->ino << " mode " << cmode << dendl;
if (in->snapid != CEPH_NOSNAP) {
in->snap_cap_refs++;
<< ccap_string(in->caps_issued()) << dendl;
}
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
f->readahead.set_trigger_requests(1);
f->readahead.set_min_readahead_size(conf->client_readahead_min);
uint64_t max_readahead = Readahead::NO_LIMIT;
if (conf->client_readahead_max_bytes) {
- max_readahead = MIN(max_readahead, (uint64_t)conf->client_readahead_max_bytes);
+ max_readahead = std::min(max_readahead, (uint64_t)conf->client_readahead_max_bytes);
}
if (conf->client_readahead_max_periods) {
- max_readahead = MIN(max_readahead, in->layout.get_period()*(uint64_t)conf->client_readahead_max_periods);
+ max_readahead = std::min(max_readahead, in->layout.get_period()*(uint64_t)conf->client_readahead_max_periods);
}
f->readahead.set_max_readahead_size(max_readahead);
vector<uint64_t> alignments;
//ldout(cct, 3) << "op: client->close(open_files[ " << fh << " ]);" << dendl;
//ldout(cct, 3) << "op: open_files.erase( " << fh << " );" << dendl;
Inode *in = f->inode.get();
- ldout(cct, 8) << "_release_fh " << f << " mode " << f->mode << " on " << *in << dendl;
+ ldout(cct, 8) << __func__ << " " << f << " mode " << f->mode << " on " << *in << dendl;
in->unset_deleg(f);
check_caps(in, 0);
}
} else {
- assert(in->snap_cap_refs > 0);
+ ceph_assert(in->snap_cap_refs > 0);
in->snap_cap_refs--;
}
// Finally, read any async err (i.e. from flushes)
int err = f->take_async_err();
if (err != 0) {
- ldout(cct, 1) << "_release_fh " << f << " on inode " << *in << " caught async_err = "
+ ldout(cct, 1) << __func__ << " " << f << " on inode " << *in << " caught async_err = "
<< cpp_strerror(err) << dendl;
} else {
- ldout(cct, 10) << "_release_fh " << f << " on inode " << *in << " no async_err state" << dendl;
+ ldout(cct, 10) << __func__ << " " << f << " on inode " << *in << " no async_err state" << dendl;
}
_put_fh(f);
}
// use normalized flags to generate cmode
- int cmode = ceph_flags_to_mode(ceph_flags_sys2wire(flags));
- if (cmode < 0)
- return -EINVAL;
+ int cflags = ceph_flags_sys2wire(flags);
+ if (cct->_conf.get_val<bool>("client_force_lazyio"))
+ cflags |= CEPH_O_LAZY;
+
+ int cmode = ceph_flags_to_mode(cflags);
int want = ceph_caps_for_mode(cmode);
int result = 0;
filepath path;
in->make_nosnap_relative_path(path);
req->set_filepath(path);
- req->head.args.open.flags = ceph_flags_sys2wire(flags & ~O_CREAT);
+ req->head.args.open.flags = cflags & ~CEPH_O_CREAT;
req->head.args.open.mode = mode;
req->head.args.open.pool = -1;
if (cct->_conf->client_debug_getattr_caps)
if (cmode & CEPH_FILE_MODE_RD)
need |= CEPH_CAP_FILE_RD;
- result = get_caps(in, need, want, &have, -1);
+ Fh fh(in, flags, cmode, fd_gen, perms);
+ result = get_caps(&fh, need, want, &have, -1);
if (result < 0) {
ldout(cct, 8) << "Unable to get caps after open of inode " << *in <<
" . Denying open: " <<
cpp_strerror(result) << dendl;
- in->put_open_ref(cmode);
} else {
put_cap_ref(in, need);
}
int Client::close(int fd)
{
ldout(cct, 3) << "close enter(" << fd << ")" << dendl;
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "close" << std::endl;
tout(cct) << fd << std::endl;
loff_t Client::lseek(int fd, loff_t offset, int whence)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "lseek" << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << offset << std::endl;
loff_t Client::_lseek(Fh *f, loff_t offset, int whence)
{
Inode *in = f->inode.get();
- int r;
+ bool whence_check = false;
+ loff_t pos = -1;
+
+ switch (whence) {
+ case SEEK_END:
+ whence_check = true;
+ break;
+
+#ifdef SEEK_DATA
+ case SEEK_DATA:
+ whence_check = true;
+ break;
+#endif
+
+#ifdef SEEK_HOLE
+ case SEEK_HOLE:
+ whence_check = true;
+ break;
+#endif
+ }
+
+ if (whence_check) {
+ int r = _getattr(in, CEPH_STAT_CAP_SIZE, f->actor_perms);
+ if (r < 0)
+ return r;
+ }
switch (whence) {
case SEEK_SET:
- f->pos = offset;
+ pos = offset;
break;
case SEEK_CUR:
- f->pos += offset;
+ pos = f->pos + offset;
break;
case SEEK_END:
- r = _getattr(in, CEPH_STAT_CAP_SIZE, f->actor_perms);
- if (r < 0)
- return r;
- f->pos = in->size + offset;
+ pos = in->size + offset;
+ break;
+
+#ifdef SEEK_DATA
+ case SEEK_DATA:
+ if (offset < 0 || static_cast<uint64_t>(offset) >= in->size)
+ return -ENXIO;
+ pos = offset;
break;
+#endif
+
+#ifdef SEEK_HOLE
+ case SEEK_HOLE:
+ if (offset < 0 || static_cast<uint64_t>(offset) >= in->size)
+ return -ENXIO;
+ pos = in->size;
+ break;
+#endif
default:
- ceph_abort();
+ ldout(cct, 1) << __func__ << ": invalid whence value " << whence << dendl;
+ return -EINVAL;
+ }
+
+ if (pos < 0) {
+ return -EINVAL;
+ } else {
+ f->pos = pos;
}
ldout(cct, 8) << "_lseek(" << f << ", " << offset << ", " << whence << ") = " << f->pos << dendl;
void Client::lock_fh_pos(Fh *f)
{
- ldout(cct, 10) << "lock_fh_pos " << f << dendl;
+ ldout(cct, 10) << __func__ << " " << f << dendl;
if (f->pos_locked || !f->pos_waiters.empty()) {
- Cond cond;
+ ceph::condition_variable cond;
f->pos_waiters.push_back(&cond);
- ldout(cct, 10) << "lock_fh_pos BLOCKING on " << f << dendl;
- while (f->pos_locked || f->pos_waiters.front() != &cond)
- cond.Wait(client_lock);
- ldout(cct, 10) << "lock_fh_pos UNBLOCKING on " << f << dendl;
- assert(f->pos_waiters.front() == &cond);
+ ldout(cct, 10) << __func__ << " BLOCKING on " << f << dendl;
+ std::unique_lock l{client_lock, std::adopt_lock};
+ cond.wait(l, [f, me=&cond] {
+ return !f->pos_locked && f->pos_waiters.front() == me;
+ });
+ l.release();
+ ldout(cct, 10) << __func__ << " UNBLOCKING on " << f << dendl;
+ ceph_assert(f->pos_waiters.front() == &cond);
f->pos_waiters.pop_front();
}
void Client::unlock_fh_pos(Fh *f)
{
- ldout(cct, 10) << "unlock_fh_pos " << f << dendl;
+ ceph_assert(ceph_mutex_is_locked_by_me(client_lock));
+
+ ldout(cct, 10) << __func__ << " " << f << dendl;
f->pos_locked = false;
+ if (!f->pos_waiters.empty()) {
+ // only wake up the oldest waiter
+ auto cond = f->pos_waiters.front();
+ cond->notify_one();
+ }
}
int Client::uninline_data(Inode *in, Context *onfinish)
NULL);
bufferlist inline_version_bl;
- ::encode(in->inline_version, inline_version_bl);
+ encode(in->inline_version, inline_version_bl);
ObjectOperation uninline_ops;
uninline_ops.cmpxattr("inline_version",
int Client::read(int fd, char *buf, loff_t size, loff_t offset)
{
- Mutex::Locker lock(client_lock);
+ std::unique_lock lock(client_lock);
tout(cct) << "read" << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << size << std::endl;
return -EBADF;
#endif
bufferlist bl;
+ /* We can't return bytes written larger than INT_MAX, clamp size to that */
+ size = std::min(size, (loff_t)INT_MAX);
int r = _read(f, offset, size, &bl);
ldout(cct, 3) << "read(" << fd << ", " << (void*)buf << ", " << size << ", " << offset << ") = " << r << dendl;
if (r >= 0) {
- bl.copy(0, bl.length(), buf);
+ lock.unlock();
+ bl.begin().copy(bl.length(), buf);
r = bl.length();
}
return r;
return _preadv_pwritev(fd, iov, iovcnt, offset, false);
}
-int Client::_read(Fh *f, int64_t offset, uint64_t size, bufferlist *bl)
+int64_t Client::_read(Fh *f, int64_t offset, uint64_t size, bufferlist *bl)
{
- const md_config_t *conf = cct->_conf;
+ int want, have = 0;
+ bool movepos = false;
+ std::unique_ptr<C_SaferCond> onuninline;
+ int64_t rc = 0;
+ const auto& conf = cct->_conf;
Inode *in = f->inode.get();
+ utime_t lat;
+ utime_t start = ceph_clock_now();
if ((f->mode & CEPH_FILE_MODE_RD) == 0)
return -EBADF;
//bool lazy = f->mode == CEPH_FILE_MODE_LAZY;
- bool movepos = false;
if (offset < 0) {
lock_fh_pos(f);
offset = f->pos;
loff_t start_pos = offset;
if (in->inline_version == 0) {
- int r = _getattr(in, CEPH_STAT_CAP_INLINE_DATA, f->actor_perms, true);
+ auto r = _getattr(in, CEPH_STAT_CAP_INLINE_DATA, f->actor_perms, true);
if (r < 0) {
- if (movepos)
- unlock_fh_pos(f);
- return r;
+ rc = r;
+ goto done;
}
- assert(in->inline_version > 0);
+ ceph_assert(in->inline_version > 0);
}
retry:
- int have;
- int r = get_caps(in, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE, &have, -1);
- if (r < 0) {
- if (movepos)
- unlock_fh_pos(f);
- return r;
+ if (f->mode & CEPH_FILE_MODE_LAZY)
+ want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
+ else
+ want = CEPH_CAP_FILE_CACHE;
+ {
+ auto r = get_caps(f, CEPH_CAP_FILE_RD, want, &have, -1);
+ if (r < 0) {
+ rc = r;
+ goto done;
+ }
}
if (f->flags & O_DIRECT)
- have &= ~CEPH_CAP_FILE_CACHE;
-
- Mutex uninline_flock("Client::_read_uninline_data flock");
- Cond uninline_cond;
- bool uninline_done = false;
- int uninline_ret = 0;
- Context *onuninline = NULL;
+ have &= ~(CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO);
if (in->inline_version < CEPH_INLINE_NONE) {
if (!(have & CEPH_CAP_FILE_CACHE)) {
- onuninline = new C_SafeCond(&uninline_flock,
- &uninline_cond,
- &uninline_done,
- &uninline_ret);
- uninline_data(in, onuninline);
+ onuninline.reset(new C_SaferCond("Client::_read_uninline_data flock"));
+ uninline_data(in, onuninline.get());
} else {
uint32_t len = in->inline_data.length();
-
uint64_t endoff = offset + size;
if (endoff > in->size)
endoff = in->size;
bl->substr_of(in->inline_data, offset, len - offset);
bl->append_zero(endoff - len);
}
+ rc = endoff - offset;
} else if ((uint64_t)offset < endoff) {
bl->append_zero(endoff - offset);
+ rc = endoff - offset;
+ } else {
+ rc = 0;
}
-
goto success;
}
}
if (!conf->client_debug_force_sync_read &&
- (conf->client_oc && (have & CEPH_CAP_FILE_CACHE))) {
+ conf->client_oc &&
+ (have & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO))) {
if (f->flags & O_RSYNC) {
_flush_range(in, offset, size);
}
- r = _read_async(f, offset, size, bl);
- if (r < 0)
+ rc = _read_async(f, offset, size, bl);
+ if (rc < 0)
goto done;
} else {
if (f->flags & O_DIRECT)
_flush_range(in, offset, size);
bool checkeof = false;
- r = _read_sync(f, offset, size, bl, &checkeof);
- if (r < 0)
+ rc = _read_sync(f, offset, size, bl, &checkeof);
+ if (rc < 0)
goto done;
if (checkeof) {
- offset += r;
- size -= r;
+ offset += rc;
+ size -= rc;
put_cap_ref(in, CEPH_CAP_FILE_RD);
have = 0;
// reverify size
- r = _getattr(in, CEPH_STAT_CAP_SIZE, f->actor_perms);
- if (r < 0)
- goto done;
+ {
+ auto r = _getattr(in, CEPH_STAT_CAP_SIZE, f->actor_perms);
+ if (r < 0) {
+ rc = r;
+ goto done;
+ }
+ }
// eof? short read.
if ((uint64_t)offset < in->size)
}
success:
+ ceph_assert(rc >= 0);
if (movepos) {
// adjust fd pos
- f->pos = start_pos + bl->length();
- unlock_fh_pos(f);
+ f->pos = start_pos + rc;
}
+
+ lat = ceph_clock_now();
+ lat -= start;
+ logger->tinc(l_c_read, lat);
done:
// done!
-
+
if (onuninline) {
- client_lock.Unlock();
- uninline_flock.Lock();
- while (!uninline_done)
- uninline_cond.Wait(uninline_flock);
- uninline_flock.Unlock();
- client_lock.Lock();
-
- if (uninline_ret >= 0 || uninline_ret == -ECANCELED) {
+ client_lock.unlock();
+ int ret = onuninline->wait();
+ client_lock.lock();
+ if (ret >= 0 || ret == -ECANCELED) {
in->inline_data.clear();
in->inline_version = CEPH_INLINE_NONE;
in->mark_caps_dirty(CEPH_CAP_FILE_WR);
check_caps(in, 0);
} else
- r = uninline_ret;
+ rc = ret;
}
-
- if (have)
+ if (have) {
put_cap_ref(in, CEPH_CAP_FILE_RD);
- if (r < 0) {
- if (movepos)
- unlock_fh_pos(f);
- return r;
- } else
- return bl->length();
+ }
+ if (movepos) {
+ unlock_fh_pos(f);
+ }
+ return rc;
}
Client::C_Readahead::C_Readahead(Client *c, Fh *f) :
int Client::_read_async(Fh *f, uint64_t off, uint64_t len, bufferlist *bl)
{
- const md_config_t *conf = cct->_conf;
+ const auto& conf = cct->_conf;
Inode *in = f->inode.get();
- ldout(cct, 10) << "_read_async " << *in << " " << off << "~" << len << dendl;
+ ldout(cct, 10) << __func__ << " " << *in << " " << off << "~" << len << dendl;
// trim read based on file size?
if (off >= in->size)
<< " max_periods=" << conf->client_readahead_max_periods << dendl;
// read (and possibly block)
- int r, rvalue = 0;
- Mutex flock("Client::_read_async flock");
- Cond cond;
- bool done = false;
- Context *onfinish = new C_SafeCond(&flock, &cond, &done, &rvalue);
+ int r = 0;
+ C_SaferCond onfinish("Client::_read_async flock");
r = objectcacher->file_read(&in->oset, &in->layout, in->snapid,
- off, len, bl, 0, onfinish);
+ off, len, bl, 0, &onfinish);
if (r == 0) {
get_cap_ref(in, CEPH_CAP_FILE_CACHE);
- client_lock.Unlock();
- flock.Lock();
- while (!done)
- cond.Wait(flock);
- flock.Unlock();
- client_lock.Lock();
+ client_lock.unlock();
+ r = onfinish.wait();
+ client_lock.lock();
put_cap_ref(in, CEPH_CAP_FILE_CACHE);
- r = rvalue;
- } else {
- // it was cached.
- delete onfinish;
}
if(f->readahead.get_min_readahead_size() > 0) {
int left = len;
int read = 0;
- ldout(cct, 10) << "_read_sync " << *in << " " << off << "~" << len << dendl;
+ ldout(cct, 10) << __func__ << " " << *in << " " << off << "~" << len << dendl;
- Mutex flock("Client::_read_sync flock");
- Cond cond;
while (left > 0) {
- int r = 0;
- bool done = false;
- Context *onfinish = new C_SafeCond(&flock, &cond, &done, &r);
+ C_SaferCond onfinish("Client::_read_sync flock");
bufferlist tbl;
int wanted = left;
filer->read_trunc(in->ino, &in->layout, in->snapid,
pos, left, &tbl, 0,
in->truncate_size, in->truncate_seq,
- onfinish);
- client_lock.Unlock();
- flock.Lock();
- while (!done)
- cond.Wait(flock);
- flock.Unlock();
- client_lock.Lock();
+ &onfinish);
+ client_lock.unlock();
+ int r = onfinish.wait();
+ client_lock.lock();
// if we get ENOENT from OSD, assume 0 bytes returned
if (r == -ENOENT)
int64_t some = in->size - pos;
if (some > left)
some = left;
- bufferptr z(some);
- z.zero();
- bl->push_back(z);
+ auto z = buffer::ptr_node::create(some);
+ z->zero();
+ bl->push_back(std::move(z));
read += some;
pos += some;
left -= some;
*/
void Client::_sync_write_commit(Inode *in)
{
- assert(unsafe_sync_write > 0);
+ ceph_assert(unsafe_sync_write > 0);
unsafe_sync_write--;
put_cap_ref(in, CEPH_CAP_FILE_BUFFER);
- ldout(cct, 15) << "sync_write_commit unsafe_sync_write = " << unsafe_sync_write << dendl;
+ ldout(cct, 15) << __func__ << " unsafe_sync_write = " << unsafe_sync_write << dendl;
if (unsafe_sync_write == 0 && unmounting) {
- ldout(cct, 10) << "sync_write_commit -- no more unsafe writes, unmount can proceed" << dendl;
- mount_cond.Signal();
+ ldout(cct, 10) << __func__ << " -- no more unsafe writes, unmount can proceed" << dendl;
+ mount_cond.notify_all();
}
}
int Client::write(int fd, const char *buf, loff_t size, loff_t offset)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "write" << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << size << std::endl;
if (fh->flags & O_PATH)
return -EBADF;
#endif
- int r = _write(fh, offset, size, buf, NULL, 0);
+ /* We can't return bytes written larger than INT_MAX, clamp size to that */
+ size = std::min(size, (loff_t)INT_MAX);
+ int r = _write(fh, offset, size, buf, NULL, false);
ldout(cct, 3) << "write(" << fd << ", \"...\", " << size << ", " << offset << ") = " << r << dendl;
return r;
}
return _preadv_pwritev(fd, iov, iovcnt, offset, true);
}
-int Client::_preadv_pwritev(int fd, const struct iovec *iov, unsigned iovcnt, int64_t offset, bool write)
+int64_t Client::_preadv_pwritev_locked(Fh *fh, const struct iovec *iov,
+ unsigned iovcnt, int64_t offset, bool write,
+ bool clamp_to_int)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << fd << std::endl;
- tout(cct) << offset << std::endl;
-
- if (unmounting)
- return -ENOTCONN;
-
- Fh *fh = get_filehandle(fd);
- if (!fh)
- return -EBADF;
#if defined(__linux__) && defined(O_PATH)
if (fh->flags & O_PATH)
return -EBADF;
for (unsigned i = 0; i < iovcnt; i++) {
totallen += iov[i].iov_len;
}
+
+ /*
+ * Some of the API functions take 64-bit size values, but only return
+ * 32-bit signed integers. Clamp the I/O sizes in those functions so that
+ * we don't do I/Os larger than the values we can return.
+ */
+ if (clamp_to_int) {
+ totallen = std::min(totallen, (loff_t)INT_MAX);
+ }
if (write) {
- int w = _write(fh, offset, totallen, NULL, iov, iovcnt);
- ldout(cct, 3) << "pwritev(" << fd << ", \"...\", " << totallen << ", " << offset << ") = " << w << dendl;
+ int64_t w = _write(fh, offset, totallen, NULL, iov, iovcnt);
+ ldout(cct, 3) << "pwritev(" << fh << ", \"...\", " << totallen << ", " << offset << ") = " << w << dendl;
return w;
} else {
bufferlist bl;
- int r = _read(fh, offset, totallen, &bl);
- ldout(cct, 3) << "preadv(" << fd << ", " << offset << ") = " << r << dendl;
+ int64_t r = _read(fh, offset, totallen, &bl);
+ ldout(cct, 3) << "preadv(" << fh << ", " << offset << ") = " << r << dendl;
if (r <= 0)
return r;
- int bufoff = 0;
+ auto iter = bl.cbegin();
for (unsigned j = 0, resid = r; j < iovcnt && resid > 0; j++) {
/*
* This piece of code aims to handle the case that bufferlist does not have enough data
* to fill in the iov
*/
- if (resid < iov[j].iov_len) {
- bl.copy(bufoff, resid, (char *)iov[j].iov_base);
- break;
- } else {
- bl.copy(bufoff, iov[j].iov_len, (char *)iov[j].iov_base);
- }
- resid -= iov[j].iov_len;
- bufoff += iov[j].iov_len;
+ const auto round_size = std::min<unsigned>(resid, iov[j].iov_len);
+ iter.copy(round_size, reinterpret_cast<char*>(iov[j].iov_base));
+ resid -= round_size;
+ /* iter is self-updating */
}
return r;
}
}
-int Client::_write(Fh *f, int64_t offset, uint64_t size, const char *buf,
- const struct iovec *iov, int iovcnt)
+int Client::_preadv_pwritev(int fd, const struct iovec *iov, unsigned iovcnt, int64_t offset, bool write)
+{
+ std::lock_guard lock(client_lock);
+ tout(cct) << fd << std::endl;
+ tout(cct) << offset << std::endl;
+
+ if (unmounting)
+ return -ENOTCONN;
+
+ Fh *fh = get_filehandle(fd);
+ if (!fh)
+ return -EBADF;
+ return _preadv_pwritev_locked(fh, iov, iovcnt, offset, write, true);
+}
+
+int64_t Client::_write(Fh *f, int64_t offset, uint64_t size, const char *buf,
+ const struct iovec *iov, int iovcnt)
{
+ uint64_t fpos = 0;
+
if ((uint64_t)(offset+size) > mdsmap->get_max_filesize()) //too large!
return -EFBIG;
return -ENOSPC;
}
- assert(in->snapid == CEPH_NOSNAP);
+ ceph_assert(in->snapid == CEPH_NOSNAP);
// was Fh opened as writeable?
if ((f->mode & CEPH_FILE_MODE_WR) == 0)
return -EBADF;
- // check quota
- uint64_t endoff = offset + size;
- std::list<InodeRef> quota_roots;
- if (endoff > in->size &&
- is_quota_bytes_exceeded(in, endoff - in->size, f->actor_perms, "a_roots)) {
- return -EDQUOT;
- }
-
// use/adjust fd pos?
if (offset < 0) {
lock_fh_pos(f);
* change out from under us.
*/
if (f->flags & O_APPEND) {
- int r = _lseek(f, 0, SEEK_END);
+ auto r = _lseek(f, 0, SEEK_END);
if (r < 0) {
unlock_fh_pos(f);
return r;
}
}
offset = f->pos;
- f->pos = offset+size;
+ fpos = offset+size;
unlock_fh_pos(f);
}
+ // check quota
+ uint64_t endoff = offset + size;
+ if (endoff > in->size && is_quota_bytes_exceeded(in, endoff - in->size,
+ f->actor_perms)) {
+ return -EDQUOT;
+ }
+
//bool lazy = f->mode == CEPH_FILE_MODE_LAZY;
ldout(cct, 10) << "cur file size is " << in->size << dendl;
int r = _getattr(in, CEPH_STAT_CAP_INLINE_DATA, f->actor_perms, true);
if (r < 0)
return r;
- assert(in->inline_version > 0);
+ ceph_assert(in->inline_version > 0);
}
// copy into fresh buffer (since our write may be resub, async)
utime_t lat;
uint64_t totalwritten;
- int have;
- int r = get_caps(in, CEPH_CAP_FILE_WR|CEPH_CAP_AUTH_SHARED,
- CEPH_CAP_FILE_BUFFER, &have, endoff);
+ int want, have;
+ if (f->mode & CEPH_FILE_MODE_LAZY)
+ want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
+ else
+ want = CEPH_CAP_FILE_BUFFER;
+ int r = get_caps(f, CEPH_CAP_FILE_WR|CEPH_CAP_AUTH_SHARED, want, &have, endoff);
if (r < 0)
return r;
}
if (f->flags & O_DIRECT)
- have &= ~CEPH_CAP_FILE_BUFFER;
+ have &= ~(CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO);
ldout(cct, 10) << " snaprealm " << *in->snaprealm << dendl;
- Mutex uninline_flock("Client::_write_uninline_data flock");
- Cond uninline_cond;
- bool uninline_done = false;
- int uninline_ret = 0;
- Context *onuninline = NULL;
-
+ std::unique_ptr<C_SaferCond> onuninline = nullptr;
+
if (in->inline_version < CEPH_INLINE_NONE) {
if (endoff > cct->_conf->client_max_inline_size ||
endoff > CEPH_INLINE_MAX_SIZE ||
!(have & CEPH_CAP_FILE_BUFFER)) {
- onuninline = new C_SafeCond(&uninline_flock,
- &uninline_cond,
- &uninline_done,
- &uninline_ret);
- uninline_data(in, onuninline);
+ onuninline.reset(new C_SaferCond("Client::_write_uninline_data flock"));
+ uninline_data(in, onuninline.get());
} else {
get_cap_ref(in, CEPH_CAP_FILE_BUFFER);
uint32_t len = in->inline_data.length();
if (endoff < len)
- in->inline_data.copy(endoff, len - endoff, bl);
+ in->inline_data.begin(endoff).copy(len - endoff, bl); // XXX
if (offset < len)
in->inline_data.splice(offset, len - offset);
}
}
- if (cct->_conf->client_oc && (have & CEPH_CAP_FILE_BUFFER)) {
+ if (cct->_conf->client_oc &&
+ (have & (CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO))) {
// do buffered write
if (!in->oset.dirty_or_tx)
get_cap_ref(in, CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_BUFFER);
_flush_range(in, offset, size);
// simple, non-atomic sync write
- Mutex flock("Client::_write flock");
- Cond cond;
- bool done = false;
- Context *onfinish = new C_SafeCond(&flock, &cond, &done);
-
+ C_SaferCond onfinish("Client::_write flock");
unsafe_sync_write++;
get_cap_ref(in, CEPH_CAP_FILE_BUFFER); // released by onsafe callback
filer->write_trunc(in->ino, &in->layout, in->snaprealm->get_snap_context(),
offset, size, bl, ceph::real_clock::now(), 0,
in->truncate_size, in->truncate_seq,
- onfinish);
- client_lock.Unlock();
- flock.Lock();
-
- while (!done)
- cond.Wait(flock);
- flock.Unlock();
- client_lock.Lock();
+ &onfinish);
+ client_lock.unlock();
+ r = onfinish.wait();
+ client_lock.lock();
_sync_write_commit(in);
+ if (r < 0)
+ goto done;
}
// if we get here, write was successful, update client metadata
lat -= start;
logger->tinc(l_c_wrlat, lat);
+ if (fpos) {
+ lock_fh_pos(f);
+ f->pos = fpos;
+ unlock_fh_pos(f);
+ }
totalwritten = size;
- r = (int)totalwritten;
+ r = (int64_t)totalwritten;
// extend file?
if (totalwritten + offset > in->size) {
in->size = totalwritten + offset;
in->mark_caps_dirty(CEPH_CAP_FILE_WR);
- if (is_quota_bytes_approaching(in, quota_roots)) {
+ if (is_quota_bytes_approaching(in, f->actor_perms)) {
check_caps(in, CHECK_CAPS_NODELAY);
} else if (is_max_size_approaching(in)) {
check_caps(in, 0);
done:
- if (onuninline) {
- client_lock.Unlock();
- uninline_flock.Lock();
- while (!uninline_done)
- uninline_cond.Wait(uninline_flock);
- uninline_flock.Unlock();
- client_lock.Lock();
+ if (nullptr != onuninline) {
+ client_lock.unlock();
+ int uninline_ret = onuninline->wait();
+ client_lock.lock();
if (uninline_ret >= 0 || uninline_ret == -ECANCELED) {
in->inline_data.clear();
int Client::ftruncate(int fd, loff_t length, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "ftruncate" << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << length << std::endl;
if (f->flags & O_PATH)
return -EBADF;
#endif
+ if ((f->mode & CEPH_FILE_MODE_WR) == 0)
+ return -EBADF;
struct stat attr;
attr.st_size = length;
return _setattr(f->inode, &attr, CEPH_SETATTR_SIZE, perms);
int Client::fsync(int fd, bool syncdataonly)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "fsync" << std::endl;
tout(cct) << fd << std::endl;
tout(cct) << syncdataonly << std::endl;
int Client::_fsync(Inode *in, bool syncdataonly)
{
int r = 0;
- Mutex lock("Client::_fsync::lock");
- Cond cond;
- bool done = false;
- C_SafeCond *object_cacher_completion = NULL;
+ std::unique_ptr<C_SaferCond> object_cacher_completion = nullptr;
ceph_tid_t flush_tid = 0;
InodeRef tmp_ref;
+ utime_t lat;
+ utime_t start = ceph_clock_now();
ldout(cct, 8) << "_fsync on " << *in << " " << (syncdataonly ? "(dataonly)":"(data+metadata)") << dendl;
if (cct->_conf->client_oc) {
- object_cacher_completion = new C_SafeCond(&lock, &cond, &done, &r);
- tmp_ref = in; // take a reference; C_SafeCond doesn't and _flush won't either
- _flush(in, object_cacher_completion);
+ object_cacher_completion.reset(new C_SaferCond("Client::_fsync::lock"));
+ tmp_ref = in; // take a reference; C_SaferCond doesn't and _flush won't either
+ _flush(in, object_cacher_completion.get());
ldout(cct, 15) << "using return-valued form of _fsync" << dendl;
}
put_request(req);
}
- if (object_cacher_completion) { // wait on a real reply instead of guessing
- client_lock.Unlock();
- lock.Lock();
+ if (nullptr != object_cacher_completion) { // wait on a real reply instead of guessing
+ client_lock.unlock();
ldout(cct, 15) << "waiting on data to flush" << dendl;
- while (!done)
- cond.Wait(lock);
- lock.Unlock();
- client_lock.Lock();
+ r = object_cacher_completion->wait();
+ client_lock.lock();
ldout(cct, 15) << "got " << r << " from flush writeback" << dendl;
} else {
// FIXME: this can starve
ldout(cct, 8) << "ino " << in->ino << " failed to commit to disk! "
<< cpp_strerror(-r) << dendl;
}
+
+ lat = ceph_clock_now();
+ lat -= start;
+ logger->tinc(l_c_fsync, lat);
return r;
}
int Client::fstat(int fd, struct stat *stbuf, const UserPerm& perms, int mask)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "fstat mask " << hex << mask << dec << std::endl;
tout(cct) << fd << std::endl;
int Client::fstatx(int fd, struct ceph_statx *stx, const UserPerm& perms,
unsigned int want, unsigned int flags)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "fstatx flags " << hex << flags << " want " << want << dec << std::endl;
tout(cct) << fd << std::endl;
int Client::chdir(const char *relpath, std::string &new_cwd,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "chdir" << std::endl;
tout(cct) << relpath << std::endl;
int r = path_walk(path, &in, perms);
if (r < 0)
return r;
+
+ if (!(in.get()->is_dir()))
+ return -ENOTDIR;
+
if (cwd != in)
cwd.swap(in);
ldout(cct, 3) << "chdir(" << relpath << ") cwd now " << cwd->ino << dendl;
void Client::_getcwd(string& dir, const UserPerm& perms)
{
filepath path;
- ldout(cct, 10) << "getcwd " << *cwd << dendl;
+ ldout(cct, 10) << __func__ << " " << *cwd << dendl;
Inode *in = cwd.get();
while (in != root) {
- assert(in->dn_set.size() < 2); // dirs can't be hard-linked
+ ceph_assert(in->dentries.size() < 2); // dirs can't be hard-linked
// A cwd or ancester is unlinked
- if (in->dn_set.empty()) {
+ if (in->dentries.empty()) {
return;
}
if (!dn) {
// look it up
- ldout(cct, 10) << "getcwd looking up parent for " << *in << dendl;
+ ldout(cct, 10) << __func__ << " looking up parent for " << *in << dendl;
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPNAME);
filepath path(in->ino);
req->set_filepath(path);
void Client::getcwd(string& dir, const UserPerm& perms)
{
- Mutex::Locker l(client_lock);
+ std::lock_guard l(client_lock);
if (!unmounting)
_getcwd(dir, perms);
}
int Client::statfs(const char *path, struct statvfs *stbuf,
const UserPerm& perms)
{
- Mutex::Locker l(client_lock);
- tout(cct) << "statfs" << std::endl;
+ std::lock_guard l(client_lock);
+ tout(cct) << __func__ << std::endl;
unsigned long int total_files_on_fs;
if (unmounting)
objecter->get_fs_stats(stats, boost::optional<int64_t>(), &cond);
}
- client_lock.Unlock();
+ client_lock.unlock();
int rval = cond.wait();
assert(root);
total_files_on_fs = root->rstat.rfiles + root->rstat.rsubdirs;
- client_lock.Lock();
+ client_lock.lock();
if (rval < 0) {
ldout(cct, 1) << "underlying call to statfs returned error: "
// Usually quota_root will == root_ancestor, but if the mount root has no
// quota but we can see a parent of it that does have a quota, we'll
// respect that one instead.
- assert(root != nullptr);
+ ceph_assert(root != nullptr);
Inode *quota_root = root->quota.is_enable() ? root : get_quota_root(root, perms);
// get_quota_root should always give us something
// because client quotas are always enabled
- assert(quota_root != nullptr);
+ ceph_assert(quota_root != nullptr);
if (quota_root && cct->_conf->client_quota_df && quota_root->quota.max_bytes) {
int Client::_do_filelock(Inode *in, Fh *fh, int lock_type, int op, int sleep,
struct flock *fl, uint64_t owner, bool removing)
{
- ldout(cct, 10) << "_do_filelock ino " << in->ino
+ ldout(cct, 10) << __func__ << " ino " << in->ino
<< (lock_type == CEPH_LOCK_FCNTL ? " fcntl" : " flock")
<< " type " << fl->l_type << " owner " << owner
<< " " << fl->l_start << "~" << fl->l_len << dendl;
+ if (in->flags & I_ERROR_FILELOCK)
+ return -EIO;
+
int lock_cmd;
if (F_RDLCK == fl->l_type)
lock_cmd = CEPH_LOCK_SHARED;
if (ret == 0) {
if (op == CEPH_MDS_OP_GETFILELOCK) {
ceph_filelock filelock;
- bufferlist::iterator p = bl.begin();
- ::decode(filelock, p);
+ auto p = bl.cbegin();
+ decode(filelock, p);
if (CEPH_LOCK_SHARED == filelock.type)
fl->l_type = F_RDLCK;
ceph_lock_state_t *lock_state;
if (lock_type == CEPH_LOCK_FCNTL) {
if (!in->fcntl_locks)
- in->fcntl_locks = new ceph_lock_state_t(cct, CEPH_LOCK_FCNTL);
- lock_state = in->fcntl_locks;
+ in->fcntl_locks.reset(new ceph_lock_state_t(cct, CEPH_LOCK_FCNTL));
+ lock_state = in->fcntl_locks.get();
} else if (lock_type == CEPH_LOCK_FLOCK) {
if (!in->flock_locks)
- in->flock_locks = new ceph_lock_state_t(cct, CEPH_LOCK_FLOCK);
- lock_state = in->flock_locks;
+ in->flock_locks.reset(new ceph_lock_state_t(cct, CEPH_LOCK_FLOCK));
+ lock_state = in->flock_locks.get();
} else {
ceph_abort();
return -EINVAL;
if (!removing) {
if (lock_type == CEPH_LOCK_FCNTL) {
if (!fh->fcntl_locks)
- fh->fcntl_locks = new ceph_lock_state_t(cct, CEPH_LOCK_FCNTL);
- lock_state = fh->fcntl_locks;
+ fh->fcntl_locks.reset(new ceph_lock_state_t(cct, CEPH_LOCK_FCNTL));
+ lock_state = fh->fcntl_locks.get();
} else {
if (!fh->flock_locks)
- fh->flock_locks = new ceph_lock_state_t(cct, CEPH_LOCK_FLOCK);
- lock_state = fh->flock_locks;
+ fh->flock_locks.reset(new ceph_lock_state_t(cct, CEPH_LOCK_FLOCK));
+ lock_state = fh->flock_locks.get();
}
_update_lock_state(fl, owner, lock_state);
}
return;
unsigned nr_fcntl_locks = in->fcntl_locks ? in->fcntl_locks->held_locks.size() : 0;
- ::encode(nr_fcntl_locks, bl);
+ encode(nr_fcntl_locks, bl);
if (nr_fcntl_locks) {
- ceph_lock_state_t* lock_state = in->fcntl_locks;
+ auto &lock_state = in->fcntl_locks;
for(multimap<uint64_t, ceph_filelock>::iterator p = lock_state->held_locks.begin();
p != lock_state->held_locks.end();
++p)
- ::encode(p->second, bl);
+ encode(p->second, bl);
}
unsigned nr_flock_locks = in->flock_locks ? in->flock_locks->held_locks.size() : 0;
- ::encode(nr_flock_locks, bl);
+ encode(nr_flock_locks, bl);
if (nr_flock_locks) {
- ceph_lock_state_t* lock_state = in->flock_locks;
+ auto &lock_state = in->flock_locks;
for(multimap<uint64_t, ceph_filelock>::iterator p = lock_state->held_locks.begin();
p != lock_state->held_locks.end();
++p)
- ::encode(p->second, bl);
+ encode(p->second, bl);
}
- ldout(cct, 10) << "_encode_filelocks ino " << in->ino << ", " << nr_fcntl_locks
+ ldout(cct, 10) << __func__ << " ino " << in->ino << ", " << nr_fcntl_locks
<< " fcntl locks, " << nr_flock_locks << " flock locks" << dendl;
}
return;
Inode *in = fh->inode.get();
- ldout(cct, 10) << "_release_filelocks " << fh << " ino " << in->ino << dendl;
+ ldout(cct, 10) << __func__ << " " << fh << " ino " << in->ino << dendl;
+
+ list<ceph_filelock> activated_locks;
list<pair<int, ceph_filelock> > to_release;
if (fh->fcntl_locks) {
- ceph_lock_state_t* lock_state = fh->fcntl_locks;
- for(multimap<uint64_t, ceph_filelock>::iterator p = lock_state->held_locks.begin();
- p != lock_state->held_locks.end();
- ++p)
- to_release.push_back(pair<int, ceph_filelock>(CEPH_LOCK_FCNTL, p->second));
- delete fh->fcntl_locks;
+ auto &lock_state = fh->fcntl_locks;
+ for(auto p = lock_state->held_locks.begin(); p != lock_state->held_locks.end(); ) {
+ auto q = p++;
+ if (in->flags & I_ERROR_FILELOCK) {
+ lock_state->remove_lock(q->second, activated_locks);
+ } else {
+ to_release.push_back(pair<int, ceph_filelock>(CEPH_LOCK_FCNTL, q->second));
+ }
+ }
+ lock_state.reset();
}
if (fh->flock_locks) {
- ceph_lock_state_t* lock_state = fh->flock_locks;
- for(multimap<uint64_t, ceph_filelock>::iterator p = lock_state->held_locks.begin();
- p != lock_state->held_locks.end();
- ++p)
- to_release.push_back(pair<int, ceph_filelock>(CEPH_LOCK_FLOCK, p->second));
- delete fh->flock_locks;
+ auto &lock_state = fh->flock_locks;
+ for(auto p = lock_state->held_locks.begin(); p != lock_state->held_locks.end(); ) {
+ auto q = p++;
+ if (in->flags & I_ERROR_FILELOCK) {
+ lock_state->remove_lock(q->second, activated_locks);
+ } else {
+ to_release.push_back(pair<int, ceph_filelock>(CEPH_LOCK_FLOCK, q->second));
+ }
+ }
+ lock_state.reset();
}
+ if ((in->flags & I_ERROR_FILELOCK) && !in->has_any_filelocks())
+ in->flags &= ~I_ERROR_FILELOCK;
+
if (to_release.empty())
return;
lock_state->remove_lock(filelock, activated_locks);
} else {
bool r = lock_state->add_lock(filelock, false, false, NULL);
- assert(r);
+ ceph_assert(r);
}
}
return statfs(0, stbuf, perms);
}
-void Client::ll_register_callbacks(struct client_callback_args *args)
+void Client::ll_register_callbacks(struct ceph_client_callback_args *args)
{
if (!args)
return;
- Mutex::Locker l(client_lock);
- ldout(cct, 10) << "ll_register_callbacks cb " << args->handle
+ std::lock_guard l(client_lock);
+ ldout(cct, 10) << __func__ << " cb " << args->handle
<< " invalidate_ino_cb " << args->ino_cb
<< " invalidate_dentry_cb " << args->dentry_cb
<< " switch_interrupt_cb " << args->switch_intr_cb
remount_cb = args->remount_cb;
remount_finisher.start();
}
- umask_cb = args->umask_cb;
+ if (args->ino_release_cb) {
+ ino_release_cb = args->ino_release_cb;
+ async_ino_releasor.start();
+ }
+ if (args->umask_cb)
+ umask_cb = args->umask_cb;
}
int Client::test_dentry_handling(bool can_invalidate)
can_invalidate_dentries = can_invalidate;
if (can_invalidate_dentries) {
- assert(dentry_invalidate_cb);
+ ceph_assert(dentry_invalidate_cb);
ldout(cct, 1) << "using dentry_invalidate_cb" << dendl;
r = 0;
- } else if (remount_cb) {
+ } else {
+ ceph_assert(remount_cb);
ldout(cct, 1) << "using remount_cb" << dendl;
r = _do_remount(false);
}
- if (r) {
- bool should_abort = cct->_conf->get_val<bool>("client_die_on_failed_dentry_invalidate");
- if (should_abort) {
- lderr(cct) << "no method to invalidate kernel dentry cache; quitting!" << dendl;
- ceph_abort();
- } else {
- lderr(cct) << "no method to invalidate kernel dentry cache; expect issues!" << dendl;
- }
- }
+
return r;
}
int Client::_sync_fs()
{
- ldout(cct, 10) << "_sync_fs" << dendl;
+ ldout(cct, 10) << __func__ << dendl;
// flush file data
- Mutex lock("Client::_fsync::lock");
- Cond cond;
- bool flush_done = false;
- if (cct->_conf->client_oc)
- objectcacher->flush_all(new C_SafeCond(&lock, &cond, &flush_done));
- else
- flush_done = true;
+ std::unique_ptr<C_SaferCond> cond = nullptr;
+ if (cct->_conf->client_oc) {
+ cond.reset(new C_SaferCond("Client::_sync_fs:lock"));
+ objectcacher->flush_all(cond.get());
+ }
// flush caps
flush_caps_sync();
wait_sync_caps(flush_tid);
- if (!flush_done) {
- client_lock.Unlock();
- lock.Lock();
- ldout(cct, 15) << "waiting on data to flush" << dendl;
- while (!flush_done)
- cond.Wait(lock);
- lock.Unlock();
- client_lock.Lock();
+ if (nullptr != cond) {
+ client_lock.unlock();
+ ldout(cct, 15) << __func__ << " waiting on data to flush" << dendl;
+ cond->wait();
+ ldout(cct, 15) << __func__ << " flush finished" << dendl;
+ client_lock.lock();
}
return 0;
int Client::sync_fs()
{
- Mutex::Locker l(client_lock);
+ std::lock_guard l(client_lock);
if (unmounting)
return -ENOTCONN;
int64_t Client::drop_caches()
{
- Mutex::Locker l(client_lock);
+ std::lock_guard l(client_lock);
return objectcacher->release_all();
}
+int Client::_lazyio(Fh *fh, int enable)
+{
+ Inode *in = fh->inode.get();
+ ldout(cct, 20) << __func__ << " " << *in << " " << !!enable << dendl;
+
+ if (!!(fh->mode & CEPH_FILE_MODE_LAZY) == !!enable)
+ return 0;
+
+ int orig_mode = fh->mode;
+ if (enable) {
+ fh->mode |= CEPH_FILE_MODE_LAZY;
+ in->get_open_ref(fh->mode);
+ in->put_open_ref(orig_mode);
+ check_caps(in, CHECK_CAPS_NODELAY);
+ } else {
+ fh->mode &= ~CEPH_FILE_MODE_LAZY;
+ in->get_open_ref(fh->mode);
+ in->put_open_ref(orig_mode);
+ check_caps(in, 0);
+ }
+
+ return 0;
+}
+
+int Client::lazyio(int fd, int enable)
+{
+ std::lock_guard l(client_lock);
+ Fh *f = get_filehandle(fd);
+ if (!f)
+ return -EBADF;
+
+ return _lazyio(f, enable);
+}
+
+int Client::ll_lazyio(Fh *fh, int enable)
+{
+ std::lock_guard lock(client_lock);
+ ldout(cct, 3) << __func__ << " " << fh << " " << fh->inode->ino << " " << !!enable << dendl;
+ tout(cct) << __func__ << std::endl;
+
+ return _lazyio(fh, enable);
+}
-int Client::lazyio_propogate(int fd, loff_t offset, size_t count)
+int Client::lazyio_propagate(int fd, loff_t offset, size_t count)
{
- Mutex::Locker l(client_lock);
- ldout(cct, 3) << "op: client->lazyio_propogate(" << fd
+ std::lock_guard l(client_lock);
+ ldout(cct, 3) << "op: client->lazyio_propagate(" << fd
<< ", " << offset << ", " << count << ")" << dendl;
Fh *f = get_filehandle(fd);
int Client::lazyio_synchronize(int fd, loff_t offset, size_t count)
{
- Mutex::Locker l(client_lock);
+ std::lock_guard l(client_lock);
ldout(cct, 3) << "op: client->lazyio_synchronize(" << fd
<< ", " << offset << ", " << count << ")" << dendl;
Inode *in = f->inode.get();
_fsync(f, true);
- if (_release(in))
- check_caps(in, 0);
+ if (_release(in)) {
+ int r =_getattr(in, CEPH_STAT_CAP_SIZE, f->actor_perms);
+ if (r < 0)
+ return r;
+ }
return 0;
}
int Client::mksnap(const char *relpath, const char *name, const UserPerm& perm)
{
- Mutex::Locker l(client_lock);
+ std::lock_guard l(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::rmsnap(const char *relpath, const char *name, const UserPerm& perms)
{
- Mutex::Locker l(client_lock);
+ std::lock_guard l(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::get_caps_issued(int fd) {
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::get_caps_issued(const char *path, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
in->mode = diri->mode;
in->uid = diri->uid;
in->gid = diri->gid;
+ in->nlink = 1;
in->mtime = diri->mtime;
in->ctime = diri->ctime;
in->btime = diri->btime;
+ in->atime = diri->atime;
in->size = diri->size;
in->change_attr = diri->change_attr;
int Client::ll_lookup(Inode *parent, const char *name, struct stat *attr,
Inode **out, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
vinodeno_t vparent = _get_vino(parent);
- ldout(cct, 3) << "ll_lookup " << vparent << " " << name << dendl;
- tout(cct) << "ll_lookup" << std::endl;
+ ldout(cct, 3) << __func__ << " " << vparent << " " << name << dendl;
+ tout(cct) << __func__ << std::endl;
tout(cct) << name << std::endl;
if (unmounting)
return -ENOTCONN;
int r = 0;
- if (!cct->_conf->fuse_default_permissions) {
- r = may_lookup(parent, perms);
- if (r < 0)
- return r;
+ if (!fuse_default_permissions) {
+ if (strcmp(name, ".") && strcmp(name, "..")) {
+ r = may_lookup(parent, perms);
+ if (r < 0)
+ return r;
+ }
}
string dname(name);
goto out;
}
- assert(in);
+ ceph_assert(in);
fill_stat(in, attr);
_ll_get(in.get());
out:
- ldout(cct, 3) << "ll_lookup " << vparent << " " << name
+ ldout(cct, 3) << __func__ << " " << vparent << " " << name
<< " -> " << r << " (" << hex << attr->st_ino << dec << ")" << dendl;
tout(cct) << attr->st_ino << std::endl;
*out = in.get();
return r;
}
-int Client::ll_lookup_inode(
- struct inodeno_t ino,
+int Client::ll_lookup_vino(
+ vinodeno_t vino,
const UserPerm& perms,
Inode **inode)
{
- Mutex::Locker lock(client_lock);
- ldout(cct, 3) << "ll_lookup_inode " << ino << dendl;
-
- // Num1: get inode and *inode
- int r = _lookup_ino(ino, perms, inode);
- if (r) {
- return r;
- }
- assert(inode != NULL);
- assert(*inode != NULL);
+ ceph_assert(inode != NULL);
- // Num2: Request the parent inode, so that we can look up the name
- Inode *parent;
- r = _lookup_parent(*inode, perms, &parent);
- if (r && r != -EINVAL) {
- // Unexpected error
- _ll_forget(*inode, 1);
- return r;
- } else if (r == -EINVAL) {
- // EINVAL indicates node without parents (root), drop out now
- // and don't try to look up the non-existent dentry.
+ if (unmounting)
+ return -ENOTCONN;
+
+ if (is_reserved_vino(vino))
+ return -ESTALE;
+
+ std::lock_guard lock(client_lock);
+ ldout(cct, 3) << __func__ << vino << dendl;
+
+ // Check the cache first
+ unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino);
+ if (p != inode_map.end()) {
+ *inode = p->second;
+ _ll_get(*inode);
return 0;
}
- // FIXME: I don't think this works; lookup_parent() returns 0 if the parent
- // is already in cache
- assert(parent != NULL);
- // Num3: Finally, get the name (dentry) of the requested inode
- r = _lookup_name(*inode, parent, perms);
- if (r) {
- // Unexpected error
- _ll_forget(parent, 1);
- _ll_forget(*inode, 1);
+ uint64_t snapid = vino.snapid;
+
+ // for snapdir, find the non-snapped dir inode
+ if (snapid == CEPH_SNAPDIR)
+ vino.snapid = CEPH_NOSNAP;
+
+ int r = _lookup_vino(vino, perms, inode);
+ if (r)
return r;
- }
+ ceph_assert(*inode != NULL);
- _ll_forget(parent, 1);
+ if (snapid == CEPH_SNAPDIR) {
+ Inode *tmp = *inode;
+
+ // open the snapdir and put the inode ref
+ *inode = open_snapdir(tmp);
+ _ll_forget(tmp, 1);
+ _ll_get(*inode);
+ }
return 0;
}
+int Client::ll_lookup_inode(
+ struct inodeno_t ino,
+ const UserPerm& perms,
+ Inode **inode)
+{
+ vinodeno_t vino(ino, CEPH_NOSNAP);
+ return ll_lookup_vino(vino, perms, inode);
+}
+
int Client::ll_lookupx(Inode *parent, const char *name, Inode **out,
struct ceph_statx *stx, unsigned want, unsigned flags,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
vinodeno_t vparent = _get_vino(parent);
- ldout(cct, 3) << "ll_lookupx " << vparent << " " << name << dendl;
+ ldout(cct, 3) << __func__ << " " << vparent << " " << name << dendl;
tout(cct) << "ll_lookupx" << std::endl;
tout(cct) << name << std::endl;
return -ENOTCONN;
int r = 0;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
r = may_lookup(parent, perms);
if (r < 0)
return r;
stx->stx_ino = 0;
stx->stx_mask = 0;
} else {
- assert(in);
+ ceph_assert(in);
fill_statx(in, mask, stx);
_ll_get(in.get());
}
- ldout(cct, 3) << "ll_lookupx " << vparent << " " << name
+ ldout(cct, 3) << __func__ << " " << vparent << " " << name
<< " -> " << r << " (" << hex << stx->stx_ino << dec << ")" << dendl;
tout(cct) << stx->stx_ino << std::endl;
*out = in.get();
int Client::ll_walk(const char* name, Inode **out, struct ceph_statx *stx,
unsigned int want, unsigned int flags, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int rc;
unsigned mask = statx_to_mask(flags, want);
- ldout(cct, 3) << "ll_walk" << name << dendl;
- tout(cct) << "ll_walk" << std::endl;
+ ldout(cct, 3) << __func__ << " " << name << dendl;
+ tout(cct) << __func__ << std::endl;
tout(cct) << name << std::endl;
rc = path_walk(fp, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), mask);
*out = NULL;
return rc;
} else {
- assert(in);
+ ceph_assert(in);
fill_statx(in, mask, stx);
_ll_get(in.get());
*out = in.get();
{
if (in->ll_ref == 0) {
in->get();
- if (in->is_dir() && !in->dn_set.empty()) {
- assert(in->dn_set.size() == 1); // dirs can't be hard-linked
+ if (in->is_dir() && !in->dentries.empty()) {
+ ceph_assert(in->dentries.size() == 1); // dirs can't be hard-linked
in->get_first_parent()->get(); // pin dentry
}
+ if (in->snapid != CEPH_NOSNAP)
+ ll_snap_ref[in->snapid]++;
}
in->ll_get();
- ldout(cct, 20) << "_ll_get " << in << " " << in->ino << " -> " << in->ll_ref << dendl;
+ ldout(cct, 20) << __func__ << " " << in << " " << in->ino << " -> " << in->ll_ref << dendl;
}
-int Client::_ll_put(Inode *in, int num)
+int Client::_ll_put(Inode *in, uint64_t num)
{
in->ll_put(num);
- ldout(cct, 20) << "_ll_put " << in << " " << in->ino << " " << num << " -> " << in->ll_ref << dendl;
+ ldout(cct, 20) << __func__ << " " << in << " " << in->ino << " " << num << " -> " << in->ll_ref << dendl;
if (in->ll_ref == 0) {
- if (in->is_dir() && !in->dn_set.empty()) {
- assert(in->dn_set.size() == 1); // dirs can't be hard-linked
+ if (in->is_dir() && !in->dentries.empty()) {
+ ceph_assert(in->dentries.size() == 1); // dirs can't be hard-linked
in->get_first_parent()->put(); // unpin dentry
}
+ if (in->snapid != CEPH_NOSNAP) {
+ auto p = ll_snap_ref.find(in->snapid);
+ ceph_assert(p != ll_snap_ref.end());
+ ceph_assert(p->second > 0);
+ if (--p->second == 0)
+ ll_snap_ref.erase(p);
+ }
put_inode(in);
return 0;
} else {
void Client::_ll_drop_pins()
{
- ldout(cct, 10) << "_ll_drop_pins" << dendl;
+ ldout(cct, 10) << __func__ << dendl;
std::set<InodeRef> to_be_put; //this set will be deconstructed item by item when exit
ceph::unordered_map<vinodeno_t, Inode*>::iterator next;
for (ceph::unordered_map<vinodeno_t, Inode*>::iterator it = inode_map.begin();
}
}
-bool Client::_ll_forget(Inode *in, int count)
+bool Client::_ll_forget(Inode *in, uint64_t count)
{
- inodeno_t ino = _get_inodeno(in);
+ inodeno_t ino = in->ino;
- ldout(cct, 8) << "ll_forget " << ino << " " << count << dendl;
- tout(cct) << "ll_forget" << std::endl;
+ ldout(cct, 8) << __func__ << " " << ino << " " << count << dendl;
+ tout(cct) << __func__ << std::endl;
tout(cct) << ino.val << std::endl;
tout(cct) << count << std::endl;
return last;
}
-bool Client::ll_forget(Inode *in, int count)
+bool Client::ll_forget(Inode *in, uint64_t count)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
return _ll_forget(in, count);
}
return ll_forget(in, 1);
}
+int Client::ll_get_snap_ref(snapid_t snap)
+{
+ std::lock_guard lock(client_lock);
+ auto p = ll_snap_ref.find(snap);
+ if (p != ll_snap_ref.end())
+ return p->second;
+ return 0;
+}
+
snapid_t Client::ll_get_snapid(Inode *in)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
return in->snapid;
}
Inode *Client::ll_get_inode(ino_t ino)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return NULL;
Inode *Client::ll_get_inode(vinodeno_t vino)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return NULL;
+ if (is_reserved_vino(vino))
+ return NULL;
+
unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino);
if (p == inode_map.end())
return NULL;
{
vinodeno_t vino = _get_vino(in);
- ldout(cct, 8) << "ll_getattr " << vino << dendl;
- tout(cct) << "ll_getattr" << std::endl;
+ ldout(cct, 8) << __func__ << " " << vino << dendl;
+ tout(cct) << __func__ << std::endl;
tout(cct) << vino.ino.val << std::endl;
if (vino.snapid < CEPH_NOSNAP)
int Client::ll_getattr(Inode *in, struct stat *attr, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
if (res == 0)
fill_stat(in, attr);
- ldout(cct, 3) << "ll_getattr " << _get_vino(in) << " = " << res << dendl;
+ ldout(cct, 3) << __func__ << " " << _get_vino(in) << " = " << res << dendl;
return res;
}
int Client::ll_getattrx(Inode *in, struct ceph_statx *stx, unsigned int want,
unsigned int flags, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
if (res == 0)
fill_statx(in, mask, stx);
- ldout(cct, 3) << "ll_getattrx " << _get_vino(in) << " = " << res << dendl;
+ ldout(cct, 3) << __func__ << " " << _get_vino(in) << " = " << res << dendl;
return res;
}
{
vinodeno_t vino = _get_vino(in);
- ldout(cct, 8) << "ll_setattrx " << vino << " mask " << hex << mask << dec
+ ldout(cct, 8) << __func__ << " " << vino << " mask " << hex << mask << dec
<< dendl;
- tout(cct) << "ll_setattrx" << std::endl;
+ tout(cct) << __func__ << std::endl;
tout(cct) << vino.ino.val << std::endl;
tout(cct) << stx->stx_mode << std::endl;
tout(cct) << stx->stx_uid << std::endl;
tout(cct) << stx->stx_btime << std::endl;
tout(cct) << mask << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int res = may_setattr(in, stx, mask, perms);
if (res < 0)
return res;
int Client::ll_setattrx(Inode *in, struct ceph_statx *stx, int mask,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
InodeRef target(in);
int res = _ll_setattrx(in, stx, mask, perms, &target);
if (res == 0) {
- assert(in == target.get());
+ ceph_assert(in == target.get());
fill_statx(in, in->caps_issued(), stx);
}
- ldout(cct, 3) << "ll_setattrx " << _get_vino(in) << " = " << res << dendl;
+ ldout(cct, 3) << __func__ << " " << _get_vino(in) << " = " << res << dendl;
return res;
}
struct ceph_statx stx;
stat_to_statx(attr, &stx);
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
InodeRef target(in);
int res = _ll_setattrx(in, &stx, mask, perms, &target);
if (res == 0) {
- assert(in == target.get());
+ ceph_assert(in == target.get());
fill_stat(in, attr);
}
- ldout(cct, 3) << "ll_setattr " << _get_vino(in) << " = " << res << dendl;
+ ldout(cct, 3) << __func__ << " " << _get_vino(in) << " = " << res << dendl;
return res;
}
int Client::getxattr(const char *path, const char *name, void *value, size_t size,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::lgetxattr(const char *path, const char *name, void *value, size_t size,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::fgetxattr(int fd, const char *name, void *value, size_t size,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::listxattr(const char *path, char *list, size_t size,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::llistxattr(const char *path, char *list, size_t size,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::flistxattr(int fd, char *list, size_t size, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::removexattr(const char *path, const char *name,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::lremovexattr(const char *path, const char *name,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::fremovexattr(int fd, const char *name, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
{
_setxattr_maybe_wait_for_osdmap(name, value, size);
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
{
_setxattr_maybe_wait_for_osdmap(name, value, size);
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
{
_setxattr_maybe_wait_for_osdmap(name, value, size);
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
if (vxattr->flags & VXATTR_RSTAT) {
flags |= CEPH_STAT_RSTAT;
}
+ if (vxattr->flags & VXATTR_DIRSTAT) {
+ flags |= CEPH_CAP_FILE_SHARED;
+ }
r = _getattr(in, flags, perms, true);
if (r != 0) {
// Error from getattr!
int Client::ll_getxattr(Inode *in, const char *name, void *value,
size_t size, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
vinodeno_t vino = _get_vino(in);
- ldout(cct, 3) << "ll_getxattr " << vino << " " << name << " size " << size << dendl;
- tout(cct) << "ll_getxattr" << std::endl;
+ ldout(cct, 3) << __func__ << " " << vino << " " << name << " size " << size << dendl;
+ tout(cct) << __func__ << std::endl;
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = xattr_permission(in, name, MAY_READ, perms);
if (r < 0)
return r;
int Client::_listxattr(Inode *in, char *name, size_t size,
const UserPerm& perms)
{
+ bool len_only = (size == 0);
int r = _getattr(in, CEPH_STAT_CAP_XATTR, perms, in->xattr_version == 0);
- if (r == 0) {
- for (map<string,bufferptr>::iterator p = in->xattrs.begin();
- p != in->xattrs.end();
- ++p)
- r += p->first.length() + 1;
+ if (r != 0) {
+ goto out;
+ }
- const VXattr *vxattrs = _get_vxattrs(in);
- r += _vxattrs_name_size(vxattrs);
+ r = 0;
+ for (const auto& p : in->xattrs) {
+ size_t this_len = p.first.length() + 1;
+ r += this_len;
+ if (len_only)
+ continue;
- if (size != 0) {
- if (size >= (unsigned)r) {
- for (map<string,bufferptr>::iterator p = in->xattrs.begin();
- p != in->xattrs.end();
- ++p) {
- memcpy(name, p->first.c_str(), p->first.length());
- name += p->first.length();
- *name = '\0';
- name++;
- }
- if (vxattrs) {
- for (int i = 0; !vxattrs[i].name.empty(); i++) {
- const VXattr& vxattr = vxattrs[i];
- if (vxattr.hidden)
- continue;
- // call pointer-to-member function
- if(vxattr.exists_cb && !(this->*(vxattr.exists_cb))(in))
- continue;
- memcpy(name, vxattr.name.c_str(), vxattr.name.length());
- name += vxattr.name.length();
- *name = '\0';
- name++;
- }
- }
- } else
- r = -ERANGE;
+ if (this_len > size) {
+ r = -ERANGE;
+ goto out;
}
+
+ memcpy(name, p.first.c_str(), this_len);
+ name += this_len;
+ size -= this_len;
}
- ldout(cct, 8) << "_listxattr(" << in->ino << ", " << size << ") = " << r << dendl;
+out:
+ ldout(cct, 8) << __func__ << "(" << in->ino << ", " << size << ") = " << r << dendl;
return r;
}
int Client::ll_listxattr(Inode *in, char *names, size_t size,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
vinodeno_t vino = _get_vino(in);
- ldout(cct, 3) << "ll_listxattr " << vino << " size " << size << dendl;
- tout(cct) << "ll_listxattr" << std::endl;
+ ldout(cct, 3) << __func__ << " " << vino << " size " << size << dendl;
+ tout(cct) << __func__ << std::endl;
tout(cct) << vino.ino.val << std::endl;
tout(cct) << size << std::endl;
req->head.args.setxattr.flags = xattr_flags;
bufferlist bl;
+ assert (value || size == 0);
bl.append((const char*)value, size);
req->set_data(bl);
int res = make_request(req, perms);
trim_cache();
- ldout(cct, 3) << "_setxattr(" << in->ino << ", \"" << name << "\") = " <<
+ ldout(cct, 3) << __func__ << "(" << in->ino << ", \"" << name << "\") = " <<
res << dendl;
return res;
}
return -EROFS;
}
+ if (size == 0) {
+ value = "";
+ } else if (value == NULL) {
+ return -EINVAL;
+ }
+
bool posix_acl_xattr = false;
if (acl_type == POSIX_ACL)
posix_acl_xattr = !strncmp(name, "system.", 7);
!posix_acl_xattr)
return -EOPNOTSUPP;
+ bool check_realm = false;
+
if (posix_acl_xattr) {
if (!strcmp(name, ACL_EA_ACCESS)) {
mode_t new_mode = in->mode;
}
} else {
const VXattr *vxattr = _match_vxattr(in, name);
- if (vxattr && vxattr->readonly)
- return -EOPNOTSUPP;
+ if (vxattr) {
+ if (vxattr->readonly)
+ return -EOPNOTSUPP;
+ if (vxattr->name.compare(0, 10, "ceph.quota") == 0 && value)
+ check_realm = true;
+ }
+ }
+
+ int ret = _do_setxattr(in, name, value, size, flags, perms);
+ if (ret >= 0 && check_realm) {
+ // check if snaprealm was created for quota inode
+ if (in->quota.is_enable() &&
+ !(in->snaprealm && in->snaprealm->ino == in->ino))
+ ret = -EOPNOTSUPP;
}
- return _do_setxattr(in, name, value, size, flags, perms);
+ return ret;
}
int Client::_setxattr(InodeRef &in, const char *name, const void *value,
{
_setxattr_maybe_wait_for_osdmap(name, value, size);
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
vinodeno_t vino = _get_vino(in);
- ldout(cct, 3) << "ll_setxattr " << vino << " " << name << " size " << size << dendl;
- tout(cct) << "ll_setxattr" << std::endl;
+ ldout(cct, 3) << __func__ << " " << vino << " " << name << " size " << size << dendl;
+ tout(cct) << __func__ << std::endl;
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = xattr_permission(in, name, MAY_WRITE, perms);
if (r < 0)
return r;
int Client::ll_removexattr(Inode *in, const char *name, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = xattr_permission(in, name, MAY_WRITE, perms);
if (r < 0)
return r;
bool Client::_vxattrcb_quota_exists(Inode *in)
{
- return in->quota.is_enable();
+ return in->quota.is_enable() &&
+ (in->snapid != CEPH_NOSNAP ||
+ (in->snaprealm && in->snaprealm->ino == in->ino));
}
size_t Client::_vxattrcb_quota(Inode *in, char *val, size_t size)
{
size_t Client::_vxattrcb_layout(Inode *in, char *val, size_t size)
{
int r = snprintf(val, size,
- "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=",
+ "stripe_unit=%llu stripe_count=%llu object_size=%llu pool=",
(unsigned long long)in->layout.stripe_unit,
(unsigned long long)in->layout.stripe_count,
(unsigned long long)in->layout.object_size);
}
size_t Client::_vxattrcb_layout_stripe_unit(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)in->layout.stripe_unit);
+ return snprintf(val, size, "%llu", (unsigned long long)in->layout.stripe_unit);
}
size_t Client::_vxattrcb_layout_stripe_count(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)in->layout.stripe_count);
+ return snprintf(val, size, "%llu", (unsigned long long)in->layout.stripe_count);
}
size_t Client::_vxattrcb_layout_object_size(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)in->layout.object_size);
+ return snprintf(val, size, "%llu", (unsigned long long)in->layout.object_size);
}
size_t Client::_vxattrcb_layout_pool(Inode *in, char *val, size_t size)
{
}
size_t Client::_vxattrcb_dir_entries(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)(in->dirstat.nfiles + in->dirstat.nsubdirs));
+ return snprintf(val, size, "%llu", (unsigned long long)(in->dirstat.nfiles + in->dirstat.nsubdirs));
}
size_t Client::_vxattrcb_dir_files(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)in->dirstat.nfiles);
+ return snprintf(val, size, "%llu", (unsigned long long)in->dirstat.nfiles);
}
size_t Client::_vxattrcb_dir_subdirs(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)in->dirstat.nsubdirs);
+ return snprintf(val, size, "%llu", (unsigned long long)in->dirstat.nsubdirs);
}
size_t Client::_vxattrcb_dir_rentries(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)(in->rstat.rfiles + in->rstat.rsubdirs));
+ return snprintf(val, size, "%llu", (unsigned long long)(in->rstat.rfiles + in->rstat.rsubdirs));
}
size_t Client::_vxattrcb_dir_rfiles(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)in->rstat.rfiles);
+ return snprintf(val, size, "%llu", (unsigned long long)in->rstat.rfiles);
}
size_t Client::_vxattrcb_dir_rsubdirs(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)in->rstat.rsubdirs);
+ return snprintf(val, size, "%llu", (unsigned long long)in->rstat.rsubdirs);
}
size_t Client::_vxattrcb_dir_rbytes(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%lld", (unsigned long long)in->rstat.rbytes);
+ return snprintf(val, size, "%llu", (unsigned long long)in->rstat.rbytes);
}
size_t Client::_vxattrcb_dir_rctime(Inode *in, char *val, size_t size)
{
- return snprintf(val, size, "%ld.09%ld", (long)in->rstat.rctime.sec(),
+ return snprintf(val, size, "%ld.%09ld", (long)in->rstat.rctime.sec(),
(long)in->rstat.rctime.nsec());
}
+bool Client::_vxattrcb_dir_pin_exists(Inode *in)
+{
+ return in->dir_pin != -ENODATA;
+}
+size_t Client::_vxattrcb_dir_pin(Inode *in, char *val, size_t size)
+{
+ return snprintf(val, size, "%ld", (long)in->dir_pin);
+}
+
+bool Client::_vxattrcb_snap_btime_exists(Inode *in)
+{
+ return !in->snap_btime.is_zero();
+}
+
+size_t Client::_vxattrcb_snap_btime(Inode *in, char *val, size_t size)
+{
+ return snprintf(val, size, "%llu.%09lu",
+ (long long unsigned)in->snap_btime.sec(),
+ (long unsigned)in->snap_btime.nsec());
+}
+
+size_t Client::_vxattrcb_cluster_fsid(Inode *in, char *val, size_t size)
+{
+ return snprintf(val, size, "%s", monclient->get_fsid().to_string().c_str());
+}
+
+size_t Client::_vxattrcb_client_id(Inode *in, char *val, size_t size)
+{
+ auto name = messenger->get_myname();
+ return snprintf(val, size, "%s%ld", name.type_str(), name.num());
+}
#define CEPH_XATTR_NAME(_type, _name) "ceph." #_type "." #_name
#define CEPH_XATTR_NAME2(_type, _name, _name2) "ceph." #_type "." #_name "." #_name2
-#define XATTR_NAME_CEPH(_type, _name) \
-{ \
- name: CEPH_XATTR_NAME(_type, _name), \
- getxattr_cb: &Client::_vxattrcb_ ## _type ## _ ## _name, \
- readonly: true, \
- hidden: false, \
- exists_cb: NULL, \
- flags: 0, \
-}
-#define XATTR_NAME_CEPH2(_type, _name, _flags) \
+#define XATTR_NAME_CEPH(_type, _name, _flags) \
{ \
name: CEPH_XATTR_NAME(_type, _name), \
getxattr_cb: &Client::_vxattrcb_ ## _type ## _ ## _name, \
readonly: true, \
- hidden: false, \
exists_cb: NULL, \
flags: _flags, \
}
name: CEPH_XATTR_NAME2(_type, _name, _field), \
getxattr_cb: &Client::_vxattrcb_ ## _name ## _ ## _field, \
readonly: false, \
- hidden: true, \
exists_cb: &Client::_vxattrcb_layout_exists, \
flags: 0, \
}
name: CEPH_XATTR_NAME(_type, _name), \
getxattr_cb: &Client::_vxattrcb_ ## _type ## _ ## _name, \
readonly: false, \
- hidden: true, \
exists_cb: &Client::_vxattrcb_quota_exists, \
flags: 0, \
}
name: "ceph.dir.layout",
getxattr_cb: &Client::_vxattrcb_layout,
readonly: false,
- hidden: true,
exists_cb: &Client::_vxattrcb_layout_exists,
flags: 0,
},
XATTR_LAYOUT_FIELD(dir, layout, object_size),
XATTR_LAYOUT_FIELD(dir, layout, pool),
XATTR_LAYOUT_FIELD(dir, layout, pool_namespace),
- XATTR_NAME_CEPH(dir, entries),
- XATTR_NAME_CEPH(dir, files),
- XATTR_NAME_CEPH(dir, subdirs),
- XATTR_NAME_CEPH2(dir, rentries, VXATTR_RSTAT),
- XATTR_NAME_CEPH2(dir, rfiles, VXATTR_RSTAT),
- XATTR_NAME_CEPH2(dir, rsubdirs, VXATTR_RSTAT),
- XATTR_NAME_CEPH2(dir, rbytes, VXATTR_RSTAT),
- XATTR_NAME_CEPH2(dir, rctime, VXATTR_RSTAT),
+ XATTR_NAME_CEPH(dir, entries, VXATTR_DIRSTAT),
+ XATTR_NAME_CEPH(dir, files, VXATTR_DIRSTAT),
+ XATTR_NAME_CEPH(dir, subdirs, VXATTR_DIRSTAT),
+ XATTR_NAME_CEPH(dir, rentries, VXATTR_RSTAT),
+ XATTR_NAME_CEPH(dir, rfiles, VXATTR_RSTAT),
+ XATTR_NAME_CEPH(dir, rsubdirs, VXATTR_RSTAT),
+ XATTR_NAME_CEPH(dir, rbytes, VXATTR_RSTAT),
+ XATTR_NAME_CEPH(dir, rctime, VXATTR_RSTAT),
{
name: "ceph.quota",
getxattr_cb: &Client::_vxattrcb_quota,
readonly: false,
- hidden: true,
exists_cb: &Client::_vxattrcb_quota_exists,
flags: 0,
},
XATTR_QUOTA_FIELD(quota, max_bytes),
XATTR_QUOTA_FIELD(quota, max_files),
+ {
+ name: "ceph.dir.pin",
+ getxattr_cb: &Client::_vxattrcb_dir_pin,
+ readonly: false,
+ exists_cb: &Client::_vxattrcb_dir_pin_exists,
+ flags: 0,
+ },
+ {
+ name: "ceph.snap.btime",
+ getxattr_cb: &Client::_vxattrcb_snap_btime,
+ readonly: true,
+ exists_cb: &Client::_vxattrcb_snap_btime_exists,
+ flags: 0,
+ },
{ name: "" } /* Required table terminator */
};
name: "ceph.file.layout",
getxattr_cb: &Client::_vxattrcb_layout,
readonly: false,
- hidden: true,
exists_cb: &Client::_vxattrcb_layout_exists,
flags: 0,
},
XATTR_LAYOUT_FIELD(file, layout, object_size),
XATTR_LAYOUT_FIELD(file, layout, pool),
XATTR_LAYOUT_FIELD(file, layout, pool_namespace),
+ {
+ name: "ceph.snap.btime",
+ getxattr_cb: &Client::_vxattrcb_snap_btime,
+ readonly: true,
+ exists_cb: &Client::_vxattrcb_snap_btime_exists,
+ flags: 0,
+ },
+ { name: "" } /* Required table terminator */
+};
+
+const Client::VXattr Client::_common_vxattrs[] = {
+ {
+ name: "ceph.cluster_fsid",
+ getxattr_cb: &Client::_vxattrcb_cluster_fsid,
+ readonly: true,
+ exists_cb: nullptr,
+ flags: 0,
+ },
+ {
+ name: "ceph.client_id",
+ getxattr_cb: &Client::_vxattrcb_client_id,
+ readonly: true,
+ exists_cb: nullptr,
+ flags: 0,
+ },
{ name: "" } /* Required table terminator */
};
vxattr++;
}
}
- }
- return NULL;
-}
-size_t Client::_vxattrs_calcu_name_size(const VXattr *vxattr)
-{
- size_t len = 0;
- while (!vxattr->name.empty()) {
- if (!vxattr->hidden)
- len += vxattr->name.length() + 1;
- vxattr++;
+ // for common vxattrs
+ vxattr = _common_vxattrs;
+ while (!vxattr->name.empty()) {
+ if (vxattr->name == name)
+ return vxattr;
+ vxattr++;
+ }
}
- return len;
+
+ return NULL;
}
int Client::ll_readlink(Inode *in, char *buf, size_t buflen, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << "ll_readlink" << std::endl;
tout(cct) << vino.ino.val << std::endl;
- set<Dentry*>::iterator dn = in->dn_set.begin();
- while (dn != in->dn_set.end()) {
- touch_dn(*dn);
- ++dn;
+ for (auto dn : in->dentries) {
+ touch_dn(dn);
}
int r = _readlink(in, buf, buflen); // FIXME: no permission checking!
dev_t rdev, struct stat *attr, Inode **out,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << mode << std::endl;
tout(cct) << rdev << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_create(parent, perms);
if (r < 0)
return r;
const UserPerm& perms)
{
unsigned caps = statx_to_mask(flags, want);
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << mode << std::endl;
tout(cct) << rdev << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_create(parent, perms);
if (r < 0)
return r;
}
// use normalized flags to generate cmode
- int cmode = ceph_flags_to_mode(ceph_flags_sys2wire(flags));
- if (cmode < 0)
- return -EINVAL;
+ int cflags = ceph_flags_sys2wire(flags);
+ if (cct->_conf.get_val<bool>("client_force_lazyio"))
+ cflags |= CEPH_O_LAZY;
+
+ int cmode = ceph_flags_to_mode(cflags);
int64_t pool_id = -1;
if (data_pool && *data_pool) {
path.push_dentry(name);
req->set_filepath(path);
req->set_inode(dir);
- req->head.args.open.flags = ceph_flags_sys2wire(flags | O_CREAT);
+ req->head.args.open.flags = cflags | CEPH_O_CREAT;
req->head.args.open.stripe_unit = stripe_unit;
req->head.args.open.stripe_count = stripe_count;
int Client::ll_mkdir(Inode *parent, const char *name, mode_t mode,
struct stat *attr, Inode **out, const UserPerm& perm)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << name << std::endl;
tout(cct) << mode << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_create(parent, perm);
if (r < 0)
return r;
struct ceph_statx *stx, unsigned want, unsigned flags,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << name << std::endl;
tout(cct) << mode << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_create(parent, perms);
if (r < 0)
return r;
int Client::ll_symlink(Inode *parent, const char *name, const char *value,
struct stat *attr, Inode **out, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << name << std::endl;
tout(cct) << value << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_create(parent, perms);
if (r < 0)
return r;
Inode **out, struct ceph_statx *stx, unsigned want,
unsigned flags, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << name << std::endl;
tout(cct) << value << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_create(parent, perms);
if (r < 0)
return r;
int Client::ll_unlink(Inode *in, const char *name, const UserPerm& perm)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_delete(in, name, perm);
if (r < 0)
return r;
dir->make_nosnap_relative_path(path);
path.push_dentry(name);
req->set_filepath(path);
+ req->set_inode(dir);
req->dentry_drop = CEPH_CAP_FILE_SHARED;
req->dentry_unless = CEPH_CAP_FILE_EXCL;
res = _lookup(dir, name, 0, &in, perms);
if (res < 0)
goto fail;
- if (op == CEPH_MDS_OP_RMDIR) {
- req->set_inode(dir);
- req->set_other_inode(in.get());
- } else {
+
+ if (op == CEPH_MDS_OP_RMSNAP) {
unlink(de, true, true);
de->put();
- req->set_other_inode(in.get());
}
+ req->set_other_inode(in.get());
res = make_request(req, perms);
int Client::ll_rmdir(Inode *in, const char *name, const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << vino.ino.val << std::endl;
tout(cct) << name << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_delete(in, name, perms);
if (r < 0)
return r;
else
return -EROFS;
}
- if (fromdir != todir) {
- Inode *fromdir_root =
- fromdir->quota.is_enable() ? fromdir : get_quota_root(fromdir, perm);
- Inode *todir_root =
- todir->quota.is_enable() ? todir : get_quota_root(todir, perm);
- if (fromdir_root != todir_root) {
- return -EXDEV;
- }
- }
InodeRef target;
MetaRequest *req = new MetaRequest(op);
req->dentry_unless = CEPH_CAP_FILE_EXCL;
InodeRef oldin, otherin;
- res = _lookup(fromdir, fromname, 0, &oldin, perm);
+ Inode *fromdir_root = nullptr;
+ Inode *todir_root = nullptr;
+ int mask = 0;
+ bool quota_check = false;
+ if (fromdir != todir) {
+ fromdir_root =
+ fromdir->quota.is_enable() ? fromdir : get_quota_root(fromdir, perm);
+ todir_root =
+ todir->quota.is_enable() ? todir : get_quota_root(todir, perm);
+
+ if (todir_root->quota.is_enable() && fromdir_root != todir_root) {
+ // use CEPH_STAT_RSTAT mask to force send getattr or lookup request
+ // to auth MDS to get latest rstat for todir_root and source dir
+ // even if their dentry caches and inode caps are satisfied.
+ res = _getattr(todir_root, CEPH_STAT_RSTAT, perm, true);
+ if (res < 0)
+ goto fail;
+
+ quota_check = true;
+ if (oldde->inode && oldde->inode->is_dir()) {
+ mask |= CEPH_STAT_RSTAT;
+ }
+ }
+ }
+
+ res = _lookup(fromdir, fromname, mask, &oldin, perm);
if (res < 0)
goto fail;
req->set_old_inode(oldinode);
req->old_inode_drop = CEPH_CAP_LINK_SHARED;
+ if (quota_check) {
+ int64_t old_bytes, old_files;
+ if (oldinode->is_dir()) {
+ old_bytes = oldinode->rstat.rbytes;
+ old_files = oldinode->rstat.rsize();
+ } else {
+ old_bytes = oldinode->size;
+ old_files = 1;
+ }
+
+ bool quota_exceed = false;
+ if (todir_root && todir_root->quota.max_bytes &&
+ (old_bytes + todir_root->rstat.rbytes) >= todir_root->quota.max_bytes) {
+ ldout(cct, 10) << "_rename (" << oldinode->ino << " bytes="
+ << old_bytes << ") to (" << todir->ino
+ << ") will exceed quota on " << *todir_root << dendl;
+ quota_exceed = true;
+ }
+
+ if (todir_root && todir_root->quota.max_files &&
+ (old_files + todir_root->rstat.rsize()) >= todir_root->quota.max_files) {
+ ldout(cct, 10) << "_rename (" << oldinode->ino << " files="
+ << old_files << ") to (" << todir->ino
+ << ") will exceed quota on " << *todir_root << dendl;
+ quota_exceed = true;
+ }
+
+ if (quota_exceed) {
+ res = (oldinode->is_dir()) ? -EXDEV : -EDQUOT;
+ goto fail;
+ }
+ }
+
res = _lookup(todir, toname, 0, &otherin, perm);
switch (res) {
case 0:
// dentry manually
unlink(oldde, true, true);
unlink(de, true, true);
+
+ req->set_inode(todir);
}
res = make_request(req, perm, &target);
int Client::ll_rename(Inode *parent, const char *name, Inode *newparent,
const char *newname, const UserPerm& perm)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << vnewparent.ino.val << std::endl;
tout(cct) << newname << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_delete(parent, name, perm);
if (r < 0)
return r;
int Client::ll_link(Inode *in, Inode *newparent, const char *newname,
const UserPerm& perm)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << vnewparent << std::endl;
tout(cct) << newname << std::endl;
- int r = 0;
InodeRef target;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
if (S_ISDIR(in->mode))
return -EPERM;
- r = may_hardlink(in, perm);
+ int r = may_hardlink(in, perm);
if (r < 0)
return r;
int Client::ll_num_osds(void)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
return objecter->with_osdmap(std::mem_fn(&OSDMap::get_num_osds));
}
int Client::ll_osdaddr(int osd, uint32_t *addr)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
entity_addr_t g;
bool exists = objecter->with_osdmap([&](const OSDMap& o) {
if (!o.exists(osd))
return false;
- g = o.get_addr(osd);
+ g = o.get_addrs(osd).front();
return true;
});
if (!exists)
uint32_t Client::ll_stripe_unit(Inode *in)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
return in->layout.stripe_unit;
}
uint64_t Client::ll_snap_seq(Inode *in)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
return in->snaprealm->seq;
}
int Client::ll_file_layout(Inode *in, file_layout_t *layout)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
*layout = in->layout;
return 0;
}
int Client::ll_get_stripe_osd(Inode *in, uint64_t blockno,
file_layout_t* layout)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
inodeno_t ino = in->ino;
uint32_t object_size = layout->object_size;
uint32_t su = layout->stripe_unit;
uint32_t stripe_count = layout->stripe_count;
uint64_t stripes_per_object = object_size / su;
+ uint64_t stripeno = 0, stripepos = 0;
- uint64_t stripeno = blockno / stripe_count; // which horizontal stripe (Y)
- uint64_t stripepos = blockno % stripe_count; // which object in the object set (X)
+ if(stripe_count) {
+ stripeno = blockno / stripe_count; // which horizontal stripe (Y)
+ stripepos = blockno % stripe_count; // which object in the object set (X)
+ }
uint64_t objectsetno = stripeno / stripes_per_object; // which object set
uint64_t objectno = objectsetno * stripe_count + stripepos; // object id
uint64_t Client::ll_get_internal_offset(Inode *in, uint64_t blockno)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
file_layout_t *layout=&(in->layout);
uint32_t object_size = layout->object_size;
uint32_t su = layout->stripe_unit;
int Client::ll_opendir(Inode *in, int flags, dir_result_t** dirpp,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << "ll_opendir" << std::endl;
tout(cct) << vino.ino.val << std::endl;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
int r = may_open(in, flags, perms);
if (r < 0)
return r;
int Client::ll_releasedir(dir_result_t *dirp)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
ldout(cct, 3) << "ll_releasedir " << dirp << dendl;
tout(cct) << "ll_releasedir" << std::endl;
tout(cct) << (unsigned long)dirp << std::endl;
int Client::ll_fsyncdir(dir_result_t *dirp)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
ldout(cct, 3) << "ll_fsyncdir " << dirp << dendl;
tout(cct) << "ll_fsyncdir" << std::endl;
tout(cct) << (unsigned long)dirp << std::endl;
int Client::ll_open(Inode *in, int flags, Fh **fhp, const UserPerm& perms)
{
- assert(!(flags & O_CREAT));
+ ceph_assert(!(flags & O_CREAT));
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
tout(cct) << ceph_flags_sys2wire(flags) << std::endl;
int r;
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
r = may_open(in, flags, perms);
if (r < 0)
goto out;
return -EEXIST;
if (r == -ENOENT && (flags & O_CREAT)) {
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
r = may_create(parent, perms);
if (r < 0)
goto out;
if (r < 0)
goto out;
- assert(*in);
+ ceph_assert(*in);
ldout(cct, 20) << "_ll_create created = " << created << dendl;
if (!created) {
- if (!cct->_conf->fuse_default_permissions) {
+ if (!fuse_default_permissions) {
r = may_open(in->get(), flags, perms);
if (r < 0) {
if (*fhp) {
int release_r = _release_fh(*fhp);
- assert(release_r == 0); // during create, no async data ops should have happened
+ ceph_assert(release_r == 0); // during create, no async data ops should have happened
}
goto out;
}
int flags, struct stat *attr, Inode **outp, Fh **fhp,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
InodeRef in;
if (unmounting)
int r = _ll_create(parent, name, mode, flags, &in, CEPH_STAT_CAP_INODE_ALL,
fhp, perms);
if (r >= 0) {
- assert(in);
+ ceph_assert(in);
// passing an Inode in outp requires an additional ref
if (outp) {
const UserPerm& perms)
{
unsigned caps = statx_to_mask(lflags, want);
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
InodeRef in;
if (unmounting)
int r = _ll_create(parent, name, mode, oflags, &in, caps, fhp, perms);
if (r >= 0) {
- assert(in);
+ ceph_assert(in);
// passing an Inode in outp requires an additional ref
if (outp) {
loff_t Client::ll_lseek(Fh *fh, loff_t offset, int whence)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
tout(cct) << "ll_lseek" << std::endl;
tout(cct) << offset << std::endl;
tout(cct) << whence << std::endl;
int Client::ll_read(Fh *fh, loff_t off, loff_t len, bufferlist *bl)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
ldout(cct, 3) << "ll_read " << fh << " " << fh->inode->ino << " " << " " << off << "~" << len << dendl;
tout(cct) << "ll_read" << std::endl;
tout(cct) << (unsigned long)fh << std::endl;
if (unmounting)
return -ENOTCONN;
- return _read(fh, off, len, bl);
+ /* We can't return bytes written larger than INT_MAX, clamp len to that */
+ len = std::min(len, (loff_t)INT_MAX);
+ int r = _read(fh, off, len, bl);
+ ldout(cct, 3) << "ll_read " << fh << " " << off << "~" << len << " = " << r
+ << dendl;
+ return r;
}
int Client::ll_read_block(Inode *in, uint64_t blockid,
uint64_t length,
file_layout_t* layout)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
CEPH_OSD_FLAG_READ,
&onfinish);
- client_lock.Unlock();
+ client_lock.unlock();
int r = onfinish.wait();
- client_lock.Lock();
+ client_lock.lock();
if (r >= 0) {
- bl.copy(0, bl.length(), buf);
+ bl.begin().copy(bl.length(), buf);
r = bl.length();
}
uint64_t length, file_layout_t* layout,
uint64_t snapseq, uint32_t sync)
{
- Mutex flock("Client::ll_write_block flock");
vinodeno_t vino = ll_get_vino(in);
- Cond cond;
- bool done;
int r = 0;
- Context *onsafe = nullptr;
-
+ std::unique_ptr<C_SaferCond> onsafe = nullptr;
+
if (length == 0) {
return -EINVAL;
}
if (true || sync) {
/* if write is stable, the epilogue is waiting on
* flock */
- onsafe = new C_SafeCond(&flock, &cond, &done, &r);
- done = false;
- } else {
- /* if write is unstable, we just place a barrier for
- * future commits to wait on */
- /*onsafe = new C_Block_Sync(this, vino.ino,
- barrier_interval(offset, offset + length), &r);
- */
- done = true;
+ onsafe.reset(new C_SaferCond("Client::ll_write_block flock"));
}
object_t oid = file_object_t(vino.ino, blockid);
SnapContext fakesnap;
- bufferptr bp;
- if (length > 0) bp = buffer::copy(buf, length);
- bufferlist bl;
- bl.push_back(bp);
+ ceph::bufferlist bl;
+ if (length > 0) {
+ bl.push_back(buffer::copy(buf, length));
+ }
ldout(cct, 1) << "ll_block_write for " << vino.ino << "." << blockid
<< dendl;
fakesnap.seq = snapseq;
/* lock just in time */
- client_lock.Lock();
+ client_lock.lock();
if (unmounting) {
- client_lock.Unlock();
- delete onsafe;
+ client_lock.unlock();
return -ENOTCONN;
}
bl,
ceph::real_clock::now(),
0,
- onsafe);
+ onsafe.get());
- client_lock.Unlock();
- if (!done /* also !sync */) {
- flock.Lock();
- while (! done)
- cond.Wait(flock);
- flock.Unlock();
+ client_lock.unlock();
+ if (nullptr != onsafe) {
+ r = onsafe->wait();
}
if (r < 0) {
uint64_t offset,
uint64_t length)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
/*
BarrierContext *bctx;
vinodeno_t vino = _get_vino(in);
int Client::ll_write(Fh *fh, loff_t off, loff_t len, const char *data)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
ldout(cct, 3) << "ll_write " << fh << " " << fh->inode->ino << " " << off <<
"~" << len << dendl;
tout(cct) << "ll_write" << std::endl;
if (unmounting)
return -ENOTCONN;
+ /* We can't return bytes written larger than INT_MAX, clamp len to that */
+ len = std::min(len, (loff_t)INT_MAX);
int r = _write(fh, off, len, data, NULL, 0);
ldout(cct, 3) << "ll_write " << fh << " " << off << "~" << len << " = " << r
<< dendl;
return r;
}
+int64_t Client::ll_writev(struct Fh *fh, const struct iovec *iov, int iovcnt, int64_t off)
+{
+ std::lock_guard lock(client_lock);
+ if (unmounting)
+ return -ENOTCONN;
+ return _preadv_pwritev_locked(fh, iov, iovcnt, off, true, false);
+}
+
+int64_t Client::ll_readv(struct Fh *fh, const struct iovec *iov, int iovcnt, int64_t off)
+{
+ std::lock_guard lock(client_lock);
+ if (unmounting)
+ return -ENOTCONN;
+ return _preadv_pwritev_locked(fh, iov, iovcnt, off, false, false);
+}
+
int Client::ll_flush(Fh *fh)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
ldout(cct, 3) << "ll_flush " << fh << " " << fh->inode->ino << " " << dendl;
tout(cct) << "ll_flush" << std::endl;
tout(cct) << (unsigned long)fh << std::endl;
int Client::ll_fsync(Fh *fh, bool syncdataonly)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
ldout(cct, 3) << "ll_fsync " << fh << " " << fh->inode->ino << " " << dendl;
tout(cct) << "ll_fsync" << std::endl;
tout(cct) << (unsigned long)fh << std::endl;
int Client::ll_sync_inode(Inode *in, bool syncdataonly)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
ldout(cct, 3) << "ll_sync_inode " << *in << " " << dendl;
tout(cct) << "ll_sync_inode" << std::endl;
tout(cct) << (unsigned long)in << std::endl;
return -EBADF;
uint64_t size = offset + length;
- std::list<InodeRef> quota_roots;
if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) &&
size > in->size &&
- is_quota_bytes_exceeded(in, size - in->size, fh->actor_perms, "a_roots)) {
+ is_quota_bytes_exceeded(in, size - in->size, fh->actor_perms)) {
return -EDQUOT;
}
int have;
- int r = get_caps(in, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER, &have, -1);
+ int r = get_caps(fh, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER, &have, -1);
if (r < 0)
return r;
- Mutex uninline_flock("Client::_fallocate_uninline_data flock");
- Cond uninline_cond;
- bool uninline_done = false;
- int uninline_ret = 0;
- Context *onuninline = NULL;
-
+ std::unique_ptr<C_SaferCond> onuninline = nullptr;
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (in->inline_version < CEPH_INLINE_NONE &&
(have & CEPH_CAP_FILE_BUFFER)) {
bufferlist bl;
+ auto inline_iter = in->inline_data.cbegin();
int len = in->inline_data.length();
if (offset < len) {
if (offset > 0)
- in->inline_data.copy(0, offset, bl);
+ inline_iter.copy(offset, bl);
int size = length;
if (offset + size > len)
size = len - offset;
if (size > 0)
bl.append_zero(size);
- if (offset + size < len)
- in->inline_data.copy(offset + size, len - offset - size, bl);
+ if (offset + size < len) {
+ inline_iter += size;
+ inline_iter.copy(len - offset - size, bl);
+ }
in->inline_data = bl;
in->inline_version++;
}
in->mark_caps_dirty(CEPH_CAP_FILE_WR);
} else {
if (in->inline_version < CEPH_INLINE_NONE) {
- onuninline = new C_SafeCond(&uninline_flock,
- &uninline_cond,
- &uninline_done,
- &uninline_ret);
- uninline_data(in, onuninline);
+ onuninline.reset(new C_SaferCond("Client::_fallocate_uninline_data flock"));
+ uninline_data(in, onuninline.get());
}
- Mutex flock("Client::_punch_hole flock");
- Cond cond;
- bool done = false;
- Context *onfinish = new C_SafeCond(&flock, &cond, &done);
+ C_SaferCond onfinish("Client::_punch_hole flock");
unsafe_sync_write++;
get_cap_ref(in, CEPH_CAP_FILE_BUFFER);
in->snaprealm->get_snap_context(),
offset, length,
ceph::real_clock::now(),
- 0, true, onfinish);
+ 0, true, &onfinish);
in->mtime = in->ctime = ceph_clock_now();
in->change_attr++;
in->mark_caps_dirty(CEPH_CAP_FILE_WR);
- client_lock.Unlock();
- flock.Lock();
- while (!done)
- cond.Wait(flock);
- flock.Unlock();
- client_lock.Lock();
+ client_lock.unlock();
+ onfinish.wait();
+ client_lock.lock();
_sync_write_commit(in);
}
} else if (!(mode & FALLOC_FL_KEEP_SIZE)) {
in->change_attr++;
in->mark_caps_dirty(CEPH_CAP_FILE_WR);
- if (is_quota_bytes_approaching(in, quota_roots)) {
+ if (is_quota_bytes_approaching(in, fh->actor_perms)) {
check_caps(in, CHECK_CAPS_NODELAY);
} else if (is_max_size_approaching(in)) {
check_caps(in, 0);
}
}
- if (onuninline) {
- client_lock.Unlock();
- uninline_flock.Lock();
- while (!uninline_done)
- uninline_cond.Wait(uninline_flock);
- uninline_flock.Unlock();
- client_lock.Lock();
+ if (nullptr != onuninline) {
+ client_lock.unlock();
+ int ret = onuninline->wait();
+ client_lock.lock();
- if (uninline_ret >= 0 || uninline_ret == -ECANCELED) {
+ if (ret >= 0 || ret == -ECANCELED) {
in->inline_data.clear();
in->inline_version = CEPH_INLINE_NONE;
in->mark_caps_dirty(CEPH_CAP_FILE_WR);
check_caps(in, 0);
} else
- r = uninline_ret;
+ r = ret;
}
put_cap_ref(in, CEPH_CAP_FILE_WR);
#endif
-int Client::ll_fallocate(Fh *fh, int mode, loff_t offset, loff_t length)
+int Client::ll_fallocate(Fh *fh, int mode, int64_t offset, int64_t length)
{
- Mutex::Locker lock(client_lock);
- ldout(cct, 3) << "ll_fallocate " << fh << " " << fh->inode->ino << " " << dendl;
- tout(cct) << "ll_fallocate " << mode << " " << offset << " " << length << std::endl;
+ std::lock_guard lock(client_lock);
+ ldout(cct, 3) << __func__ << " " << fh << " " << fh->inode->ino << " " << dendl;
+ tout(cct) << __func__ << " " << mode << " " << offset << " " << length << std::endl;
tout(cct) << (unsigned long)fh << std::endl;
if (unmounting)
int Client::fallocate(int fd, int mode, loff_t offset, loff_t length)
{
- Mutex::Locker lock(client_lock);
- tout(cct) << "fallocate " << " " << fd << mode << " " << offset << " " << length << std::endl;
+ std::lock_guard lock(client_lock);
+ tout(cct) << __func__ << " " << " " << fd << mode << " " << offset << " " << length << std::endl;
if (unmounting)
return -ENOTCONN;
int Client::ll_release(Fh *fh)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
- ldout(cct, 3) << "ll_release (fh)" << fh << " " << fh->inode->ino << " " <<
+ ldout(cct, 3) << __func__ << " (fh)" << fh << " " << fh->inode->ino << " " <<
dendl;
- tout(cct) << "ll_release (fh)" << std::endl;
+ tout(cct) << __func__ << " (fh)" << std::endl;
tout(cct) << (unsigned long)fh << std::endl;
if (ll_unclosed_fh_set.count(fh))
int Client::ll_getlk(Fh *fh, struct flock *fl, uint64_t owner)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
ldout(cct, 3) << "ll_getlk (fh)" << fh << " " << fh->inode->ino << dendl;
tout(cct) << "ll_getk (fh)" << (unsigned long)fh << std::endl;
int Client::ll_setlk(Fh *fh, struct flock *fl, uint64_t owner, int sleep)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
- ldout(cct, 3) << "ll_setlk (fh) " << fh << " " << fh->inode->ino << dendl;
- tout(cct) << "ll_setk (fh)" << (unsigned long)fh << std::endl;
+ ldout(cct, 3) << __func__ << " (fh) " << fh << " " << fh->inode->ino << dendl;
+ tout(cct) << __func__ << " (fh)" << (unsigned long)fh << std::endl;
if (unmounting)
return -ENOTCONN;
int Client::ll_flock(Fh *fh, int cmd, uint64_t owner)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
- ldout(cct, 3) << "ll_flock (fh) " << fh << " " << fh->inode->ino << dendl;
- tout(cct) << "ll_flock (fh)" << (unsigned long)fh << std::endl;
+ ldout(cct, 3) << __func__ << " (fh) " << fh << " " << fh->inode->ino << dendl;
+ tout(cct) << __func__ << " (fh)" << (unsigned long)fh << std::endl;
if (unmounting)
return -ENOTCONN;
int Client::set_deleg_timeout(uint32_t timeout)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
/*
* The whole point is to prevent blacklisting so we must time out the
{
int ret = -EINVAL;
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (!mounted)
return -ENOTCONN;
default:
try {
ret = inode->set_deleg(fh, cmd, cb, priv);
- } catch (std::bad_alloc) {
+ } catch (std::bad_alloc&) {
ret = -ENOMEM;
}
break;
req->get();
}
void finish(int r) override {
- Mutex::Locker l(client->client_lock);
- assert(req->head.op == CEPH_MDS_OP_SETFILELOCK);
+ std::lock_guard l(client->client_lock);
+ ceph_assert(req->head.op == CEPH_MDS_OP_SETFILELOCK);
client->_interrupt_filelock(req);
client->put_request(req);
}
void Client::ll_interrupt(void *d)
{
MetaRequest *req = static_cast<MetaRequest*>(d);
- ldout(cct, 3) << "ll_interrupt tid " << req->get_tid() << dendl;
- tout(cct) << "ll_interrupt tid " << req->get_tid() << std::endl;
+ ldout(cct, 3) << __func__ << " tid " << req->get_tid() << dendl;
+ tout(cct) << __func__ << " tid " << req->get_tid() << std::endl;
interrupt_finisher.queue(new C_Client_RequestInterrupt(this, req));
}
int Client::describe_layout(const char *relpath, file_layout_t *lp,
const UserPerm& perms)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
*lp = in->layout;
- ldout(cct, 3) << "describe_layout(" << relpath << ") = 0" << dendl;
+ ldout(cct, 3) << __func__ << "(" << relpath << ") = 0" << dendl;
return 0;
}
int Client::fdescribe_layout(int fd, file_layout_t *lp)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
*lp = in->layout;
- ldout(cct, 3) << "fdescribe_layout(" << fd << ") = 0" << dendl;
+ ldout(cct, 3) << __func__ << "(" << fd << ") = 0" << dendl;
return 0;
}
int64_t Client::get_default_pool_id()
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int64_t Client::get_pool_id(const char *pool_name)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
string Client::get_pool_name(int64_t pool)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return string();
int Client::get_pool_replication(int64_t pool)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::get_file_extent_osds(int fd, loff_t off, loff_t *len, vector<int>& osds)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
vector<ObjectExtent> extents;
Striper::file_to_extents(cct, in->ino, &in->layout, off, 1, in->truncate_size, extents);
- assert(extents.size() == 1);
+ ceph_assert(extents.size() == 1);
objecter->with_osdmap([&](const OSDMap& o) {
pg_t pg = o.object_locator_to_pg(extents[0].oid, extents[0].oloc);
int Client::get_osd_crush_location(int id, vector<pair<string, string> >& path)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
int Client::get_file_stripe_address(int fd, loff_t offset,
vector<entity_addr_t>& address)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
vector<ObjectExtent> extents;
Striper::file_to_extents(cct, in->ino, &in->layout, offset, 1,
in->truncate_size, extents);
- assert(extents.size() == 1);
+ ceph_assert(extents.size() == 1);
// now we have the object and its 'layout'
return objecter->with_osdmap([&](const OSDMap& o) {
if (osds.empty())
return -EINVAL;
for (unsigned i = 0; i < osds.size(); i++) {
- entity_addr_t addr = o.get_addr(osds[i]);
+ entity_addr_t addr = o.get_addrs(osds[i]).front();
address.push_back(addr);
}
return 0;
int Client::get_osd_addr(int osd, entity_addr_t& addr)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
if (!o.exists(osd))
return -ENOENT;
- addr = o.get_addr(osd);
+ addr = o.get_addrs(osd).front();
return 0;
});
}
int Client::enumerate_layout(int fd, vector<ObjectExtent>& result,
loff_t length, loff_t offset)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
// map to a list of extents
Striper::file_to_extents(cct, in->ino, &in->layout, offset, length, in->truncate_size, result);
- ldout(cct, 3) << "enumerate_layout(" << fd << ", " << length << ", " << offset << ") = 0" << dendl;
+ ldout(cct, 3) << __func__ << "(" << fd << ", " << length << ", " << offset << ") = 0" << dendl;
return 0;
}
/* find an osd with the same ip. -ENXIO if none. */
int Client::get_local_osd()
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (unmounting)
return -ENOTCONN;
objecter->with_osdmap([this](const OSDMap& o) {
if (o.get_epoch() != local_osd_epoch) {
- local_osd = o.find_osd_on_ip(messenger->get_myaddr());
+ local_osd = o.find_osd_on_ip(messenger->get_myaddrs().front());
local_osd_epoch = o.get_epoch();
}
});
void Client::ms_handle_connect(Connection *con)
{
- ldout(cct, 10) << "ms_handle_connect on " << con->get_peer_addr() << dendl;
+ ldout(cct, 10) << __func__ << " on " << con->get_peer_addr() << dendl;
}
bool Client::ms_handle_reset(Connection *con)
{
- ldout(cct, 0) << "ms_handle_reset on " << con->get_peer_addr() << dendl;
+ ldout(cct, 0) << __func__ << " on " << con->get_peer_addr() << dendl;
return false;
}
void Client::ms_handle_remote_reset(Connection *con)
{
- ldout(cct, 0) << "ms_handle_remote_reset on " << con->get_peer_addr() << dendl;
- Mutex::Locker l(client_lock);
+ ldout(cct, 0) << __func__ << " on " << con->get_peer_addr() << dendl;
+ std::lock_guard l(client_lock);
switch (con->get_peer_type()) {
case CEPH_ENTITY_TYPE_MDS:
{
// kludge to figure out which mds this is; fixme with a Connection* state
mds_rank_t mds = MDS_RANK_NONE;
MetaSession *s = NULL;
- for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
- p != mds_sessions.end();
- ++p) {
- if (mdsmap->get_addr(p->first) == con->get_peer_addr()) {
- mds = p->first;
- s = p->second;
+ for (auto &p : mds_sessions) {
+ if (mdsmap->get_addrs(p.first) == con->get_peer_addrs()) {
+ mds = p.first;
+ s = &p.second;
}
}
if (mds >= 0) {
case MetaSession::STATE_OPEN:
{
objecter->maybe_request_map(); /* to check if we are blacklisted */
- const md_config_t *conf = cct->_conf;
- if (conf->client_reconnect_stale) {
+ if (cct->_conf.get_val<bool>("client_reconnect_stale")) {
ldout(cct, 1) << "reset from mds we were open; close mds session for reconnect" << dendl;
_closed_mds_session(s);
} else {
bool Client::ms_handle_refused(Connection *con)
{
- ldout(cct, 1) << "ms_handle_refused on " << con->get_peer_addr() << dendl;
+ ldout(cct, 1) << __func__ << " on " << con->get_peer_addr() << dendl;
return false;
}
-bool Client::ms_get_authorizer(int dest_type, AuthAuthorizer **authorizer, bool force_new)
-{
- if (dest_type == CEPH_ENTITY_TYPE_MON)
- return true;
- *authorizer = monclient->build_authorizer(dest_type);
- return true;
-}
-
Inode *Client::get_quota_root(Inode *in, const UserPerm& perms)
{
- Inode *cur = in;
- utime_t now = ceph_clock_now();
-
- while (cur) {
- if (cur != in && cur->quota.is_enable())
- break;
+ Inode *quota_in = root_ancestor;
+ SnapRealm *realm = in->snaprealm;
+ while (realm) {
+ ldout(cct, 10) << __func__ << " realm " << realm->ino << dendl;
+ if (realm->ino != in->ino) {
+ auto p = inode_map.find(vinodeno_t(realm->ino, CEPH_NOSNAP));
+ if (p == inode_map.end())
+ break;
- Inode *parent_in = NULL;
- if (!cur->dn_set.empty()) {
- for (auto p = cur->dn_set.begin(); p != cur->dn_set.end(); ++p) {
- Dentry *dn = *p;
- if (dn->lease_mds >= 0 &&
- dn->lease_ttl > now &&
- mds_sessions.count(dn->lease_mds)) {
- parent_in = dn->dir->parent_inode;
- } else {
- Inode *diri = dn->dir->parent_inode;
- if (diri->caps_issued_mask(CEPH_CAP_FILE_SHARED) &&
- diri->shared_gen == dn->cap_shared_gen) {
- parent_in = dn->dir->parent_inode;
- }
- }
- if (parent_in)
- break;
+ if (p->second->quota.is_enable()) {
+ quota_in = p->second;
+ break;
}
- } else if (root_parents.count(cur)) {
- parent_in = root_parents[cur].get();
- }
-
- if (parent_in) {
- cur = parent_in;
- continue;
- }
-
- if (cur == root_ancestor)
- break;
-
- // deleted inode
- if (cur->nlink == 0) {
- cur = root_ancestor;
- break;
- }
-
- MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPNAME);
- filepath path(cur->ino);
- req->set_filepath(path);
- req->set_inode(cur);
-
- InodeRef parent_ref;
- int ret = make_request(req, perms, &parent_ref);
- if (ret < 0) {
- ldout(cct, 1) << __func__ << " " << in->vino()
- << " failed to find parent of " << cur->vino()
- << " err " << ret << dendl;
- // FIXME: what to do?
- cur = root_ancestor;
- break;
}
-
- now = ceph_clock_now();
- if (cur == in)
- cur = parent_ref.get();
- else
- cur = in; // start over
+ realm = realm->pparent;
}
-
- ldout(cct, 10) << __func__ << " " << in->vino() << " -> " << cur->vino() << dendl;
- return cur;
+ ldout(cct, 10) << __func__ << " " << in->vino() << " -> " << quota_in->vino() << dendl;
+ return quota_in;
}
/**
std::function<bool (const Inode &in)> test)
{
while (true) {
- assert(in != NULL);
+ ceph_assert(in != NULL);
if (test(*in)) {
return true;
}
}
bool Client::is_quota_bytes_exceeded(Inode *in, int64_t new_bytes,
- const UserPerm& perms,
- std::list<InodeRef>* quota_roots)
+ const UserPerm& perms)
{
return check_quota_condition(in, perms,
- [&new_bytes, quota_roots](const Inode &in) {
- if (quota_roots)
- quota_roots->emplace_back(const_cast<Inode*>(&in));
+ [&new_bytes](const Inode &in) {
return in.quota.max_bytes && (in.rstat.rbytes + new_bytes)
> in.quota.max_bytes;
});
}
-bool Client::is_quota_bytes_approaching(Inode *in, std::list<InodeRef>& quota_roots)
+bool Client::is_quota_bytes_approaching(Inode *in, const UserPerm& perms)
{
- assert(in->size >= in->reported_size);
+ ceph_assert(in->size >= in->reported_size);
const uint64_t size = in->size - in->reported_size;
-
- for (auto& diri : quota_roots) {
- if (diri->quota.max_bytes) {
- if (diri->rstat.rbytes >= diri->quota.max_bytes)
- return true;
-
- uint64_t space = diri->quota.max_bytes - diri->rstat.rbytes;
- if ((space >> 4) < size)
- return true;
- }
- }
- return false;
+ return check_quota_condition(in, perms,
+ [&size](const Inode &in) {
+ if (in.quota.max_bytes) {
+ if (in.rstat.rbytes >= in.quota.max_bytes) {
+ return true;
+ }
+
+ const uint64_t space = in.quota.max_bytes - in.rstat.rbytes;
+ return (space >> 4) < size;
+ } else {
+ return false;
+ }
+ });
}
enum {
if (!cct->_conf->client_check_pool_perm)
return 0;
+ /* Only need to do this for regular files */
+ if (!in->is_file())
+ return 0;
+
int64_t pool_id = in->layout.pool_id;
std::string pool_ns = in->layout.pool_ns;
std::pair<int64_t, std::string> perm_key(pool_id, pool_ns);
wait_on_list(waiting_for_pool_perm);
} else {
have = it->second;
- assert(have & POOL_CHECKED);
+ ceph_assert(have & POOL_CHECKED);
break;
}
}
objecter->mutate(oid, OSDMap::file_to_object_locator(in->layout), wr_op,
nullsnapc, ceph::real_clock::now(), 0, &wr_cond);
- client_lock.Unlock();
+ client_lock.unlock();
int rd_ret = rd_cond.wait();
int wr_ret = wr_cond.wait();
- client_lock.Lock();
+ client_lock.lock();
bool errored = false;
if (rd_ret == 0 || rd_ret == -ENOENT)
have |= POOL_READ;
else if (rd_ret != -EPERM) {
- ldout(cct, 10) << "check_pool_perm on pool " << pool_id << " ns " << pool_ns
+ ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns
<< " rd_err = " << rd_ret << " wr_err = " << wr_ret << dendl;
errored = true;
}
if (wr_ret == 0 || wr_ret == -EEXIST)
have |= POOL_WRITE;
else if (wr_ret != -EPERM) {
- ldout(cct, 10) << "check_pool_perm on pool " << pool_id << " ns " << pool_ns
+ ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns
<< " rd_err = " << rd_ret << " wr_err = " << wr_ret << dendl;
errored = true;
}
}
if ((need & CEPH_CAP_FILE_RD) && !(have & POOL_READ)) {
- ldout(cct, 10) << "check_pool_perm on pool " << pool_id << " ns " << pool_ns
+ ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns
<< " need " << ccap_string(need) << ", but no read perm" << dendl;
return -EPERM;
}
if ((need & CEPH_CAP_FILE_WR) && !(have & POOL_WRITE)) {
- ldout(cct, 10) << "check_pool_perm on pool " << pool_id << " ns " << pool_ns
+ ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns
<< " need " << ccap_string(need) << ", but no write perm" << dendl;
return -EPERM;
}
r = xattrs.size();
if (r > 0)
- ::encode(xattrs, xattrs_bl);
+ encode(xattrs, xattrs_bl);
} else {
if (umask_cb)
*mode &= ~umask_cb(callback_handle);
void Client::set_filer_flags(int flags)
{
- Mutex::Locker l(client_lock);
- assert(flags == 0 ||
+ std::lock_guard l(client_lock);
+ ceph_assert(flags == 0 ||
flags == CEPH_OSD_FLAG_LOCALIZE_READS);
objecter->add_global_op_flags(flags);
}
void Client::clear_filer_flags(int flags)
{
- Mutex::Locker l(client_lock);
- assert(flags == CEPH_OSD_FLAG_LOCALIZE_READS);
+ std::lock_guard l(client_lock);
+ ceph_assert(flags == CEPH_OSD_FLAG_LOCALIZE_READS);
objecter->clear_global_op_flag(flags);
}
+// called before mount
+void Client::set_uuid(const std::string& uuid)
+{
+ std::lock_guard l(client_lock);
+ assert(initialized);
+ assert(!uuid.empty());
+
+ metadata["uuid"] = uuid;
+ _close_sessions();
+}
+
+// called before mount. 0 means infinite
+void Client::set_session_timeout(unsigned timeout)
+{
+ std::lock_guard l(client_lock);
+ assert(initialized);
+
+ metadata["timeout"] = stringify(timeout);
+}
+
+// called before mount
+int Client::start_reclaim(const std::string& uuid, unsigned flags,
+ const std::string& fs_name)
+{
+ std::lock_guard l(client_lock);
+ if (!initialized)
+ return -ENOTCONN;
+
+ if (uuid.empty())
+ return -EINVAL;
+
+ {
+ auto it = metadata.find("uuid");
+ if (it != metadata.end() && it->second == uuid)
+ return -EINVAL;
+ }
+
+ int r = subscribe_mdsmap(fs_name);
+ if (r < 0) {
+ lderr(cct) << "mdsmap subscription failed: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ if (metadata.empty())
+ populate_metadata("");
+
+ while (mdsmap->get_epoch() == 0)
+ wait_on_list(waiting_for_mdsmap);
+
+ reclaim_errno = 0;
+ for (unsigned mds = 0; mds < mdsmap->get_num_in_mds(); ) {
+ if (!mdsmap->is_up(mds)) {
+ ldout(cct, 10) << "mds." << mds << " not active, waiting for new mdsmap" << dendl;
+ wait_on_list(waiting_for_mdsmap);
+ continue;
+ }
+
+ MetaSession *session;
+ if (!have_open_session(mds)) {
+ session = _get_or_open_mds_session(mds);
+ if (session->state == MetaSession::STATE_REJECTED)
+ return -EPERM;
+ if (session->state != MetaSession::STATE_OPENING) {
+ // umounting?
+ return -EINVAL;
+ }
+ ldout(cct, 10) << "waiting for session to mds." << mds << " to open" << dendl;
+ wait_on_context_list(session->waiting_for_open);
+ continue;
+ }
+
+ session = &mds_sessions.at(mds);
+ if (!session->mds_features.test(CEPHFS_FEATURE_RECLAIM_CLIENT))
+ return -EOPNOTSUPP;
+
+ if (session->reclaim_state == MetaSession::RECLAIM_NULL ||
+ session->reclaim_state == MetaSession::RECLAIMING) {
+ session->reclaim_state = MetaSession::RECLAIMING;
+ auto m = make_message<MClientReclaim>(uuid, flags);
+ session->con->send_message2(std::move(m));
+ wait_on_list(waiting_for_reclaim);
+ } else if (session->reclaim_state == MetaSession::RECLAIM_FAIL) {
+ return reclaim_errno ? : -ENOTRECOVERABLE;
+ } else {
+ mds++;
+ }
+ }
+
+ // didn't find target session in any mds
+ if (reclaim_target_addrs.empty()) {
+ if (flags & CEPH_RECLAIM_RESET)
+ return -ENOENT;
+ return -ENOTRECOVERABLE;
+ }
+
+ if (flags & CEPH_RECLAIM_RESET)
+ return 0;
+
+ // use blacklist to check if target session was killed
+ // (config option mds_session_blacklist_on_evict needs to be true)
+ C_SaferCond cond;
+ if (!objecter->wait_for_map(reclaim_osd_epoch, &cond)) {
+ ldout(cct, 10) << __func__ << ": waiting for OSD epoch " << reclaim_osd_epoch << dendl;
+ client_lock.unlock();
+ cond.wait();
+ client_lock.lock();
+ }
+
+ bool blacklisted = objecter->with_osdmap(
+ [this](const OSDMap &osd_map) -> bool {
+ return osd_map.is_blacklisted(reclaim_target_addrs);
+ });
+ if (blacklisted)
+ return -ENOTRECOVERABLE;
+
+ metadata["reclaiming_uuid"] = uuid;
+ return 0;
+}
+
+void Client::finish_reclaim()
+{
+ auto it = metadata.find("reclaiming_uuid");
+ if (it == metadata.end()) {
+ for (auto &p : mds_sessions)
+ p.second.reclaim_state = MetaSession::RECLAIM_NULL;
+ return;
+ }
+
+ for (auto &p : mds_sessions) {
+ p.second.reclaim_state = MetaSession::RECLAIM_NULL;
+ auto m = make_message<MClientReclaim>("", MClientReclaim::FLAG_FINISH);
+ p.second.con->send_message2(std::move(m));
+ }
+
+ metadata["uuid"] = it->second;
+ metadata.erase(it);
+}
+
+void Client::handle_client_reclaim_reply(const MConstRef<MClientReclaimReply>& reply)
+{
+ mds_rank_t from = mds_rank_t(reply->get_source().num());
+ ldout(cct, 10) << __func__ << " " << *reply << " from mds." << from << dendl;
+
+ MetaSession *session = _get_mds_session(from, reply->get_connection().get());
+ if (!session) {
+ ldout(cct, 10) << " discarding reclaim reply from sessionless mds." << from << dendl;
+ return;
+ }
+
+ if (reply->get_result() >= 0) {
+ session->reclaim_state = MetaSession::RECLAIM_OK;
+ if (reply->get_epoch() > reclaim_osd_epoch)
+ reclaim_osd_epoch = reply->get_epoch();
+ if (!reply->get_addrs().empty())
+ reclaim_target_addrs = reply->get_addrs();
+ } else {
+ session->reclaim_state = MetaSession::RECLAIM_FAIL;
+ reclaim_errno = reply->get_result();
+ }
+
+ signal_cond_list(waiting_for_reclaim);
+}
+
/**
* This is included in cap release messages, to cause
* the MDS to wait until this OSD map epoch. It is necessary
return keys;
}
-void Client::handle_conf_change(const struct md_config_t *conf,
+void Client::handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed)
{
- Mutex::Locker lock(client_lock);
+ std::lock_guard lock(client_lock);
if (changed.count("client_cache_mid")) {
lru.lru_set_midpoint(cct->_conf->client_cache_mid);
mds_rank_t Client::_get_random_up_mds() const
{
- assert(client_lock.is_locked_by_me());
+ ceph_assert(ceph_mutex_is_locked_by_me(client_lock));
std::set<mds_rank_t> up;
mdsmap->get_up_mds_set(up);
StandaloneClient::StandaloneClient(Messenger *m, MonClient *mc)
- : Client(m, mc, new Objecter(m->cct, m, mc, NULL, 0, 0))
+ : Client(m, mc, new Objecter(m->cct, m, mc, nullptr))
{
monclient->set_messenger(m);
objecter->set_client_incarnation(0);
int StandaloneClient::init()
{
- timer.init();
- objectcacher->start();
+ _pre_init();
objecter->init();
- client_lock.Lock();
- assert(!initialized);
+ client_lock.lock();
+ ceph_assert(!is_initialized());
messenger->add_dispatcher_tail(objecter);
messenger->add_dispatcher_tail(this);
if (r < 0) {
// need to do cleanup because we're in an intermediate init state
timer.shutdown();
- client_lock.Unlock();
+ client_lock.unlock();
objecter->shutdown();
objectcacher->stop();
monclient->shutdown();
}
objecter->start();
- client_lock.Unlock();
+ client_lock.unlock();
_finish_init();
return 0;