fuse_default_permissions = cct->_conf.get_val<bool>(
"fuse_default_permissions");
+ _collect_and_send_global_metrics = cct->_conf.get_val<bool>(
+ "client_collect_and_send_global_metrics");
+
if (cct->_conf->client_acl_type == "posix_acl")
acl_type = POSIX_ACL;
objecter_finisher.start();
filer.reset(new Filer(objecter, &objecter_finisher));
- objecter->enable_blocklist_events();
objectcacher->start();
}
break;
}
session->mds_features = std::move(m->supported_features);
+ session->mds_metric_flags = std::move(m->metric_spec.metric_flags);
renew_caps(session.get());
session->state = MetaSession::STATE_OPEN;
MetaRequest *request = mds_requests[tid];
ceph_assert(request);
+ /*
+ * The type of 'num_fwd' in ceph 'MClientRequestForward'
+ * is 'int32_t', while in 'ceph_mds_request_head' the
+ * type is '__u8'. So in case the request bounces between
+ * MDSes exceeding 256 times, the client will get stuck.
+ *
+ * In this case it's ususally a bug in MDS and continue
+ * bouncing the request makes no sense.
+ *
+ * In future this could be fixed in ceph code, so avoid
+ * using the hardcode here.
+ */
+ int max_fwd = sizeof(((struct ceph_mds_request_head*)0)->num_fwd);
+ max_fwd = 1 << (max_fwd * CHAR_BIT) - 1;
+ auto num_fwd = fwd->get_num_fwd();
+ if (num_fwd <= request->num_fwd || num_fwd >= max_fwd) {
+ if (request->num_fwd >= max_fwd || num_fwd >= max_fwd) {
+ request->abort(-EMULTIHOP);
+ request->caller_cond->notify_all();
+ ldout(cct, 1) << __func__ << " tid " << tid << " seq overflow"
+ << ", abort it" << dendl;
+ } else {
+ ldout(cct, 10) << __func__ << " tid " << tid
+ << " old fwd seq " << fwd->get_num_fwd()
+ << " <= req fwd " << request->num_fwd
+ << ", ignore it" << dendl;
+ }
+ return;
+ }
+
// reset retry counter
request->retry_attempt = 0;
request->mds = -1;
request->item.remove_myself();
- request->num_fwd = fwd->get_num_fwd();
+ request->num_fwd = num_fwd;
request->resend_mds = fwd->get_dest_mds();
request->caller_cond->notify_all();
}
void Client::handle_osd_map(const MConstRef<MOSDMap>& m)
{
- std::set<entity_addr_t> new_blocklists;
-
std::scoped_lock cl(client_lock);
- objecter->consume_blocklist_events(&new_blocklists);
const auto myaddrs = messenger->get_myaddrs();
- bool new_blocklist = false;
- bool prenautilus = objecter->with_osdmap(
+ bool new_blocklist = objecter->with_osdmap(
[&](const OSDMap& o) {
- return o.require_osd_release < ceph_release_t::nautilus;
+ return o.is_blocklisted(myaddrs);
});
- if (!blocklisted) {
- for (auto a : myaddrs.v) {
- // blocklist entries are always TYPE_ANY for nautilus+
- a.set_type(entity_addr_t::TYPE_ANY);
- if (new_blocklists.count(a)) {
- new_blocklist = true;
- break;
- }
- if (prenautilus) {
- // ...except pre-nautilus, they were TYPE_LEGACY
- a.set_type(entity_addr_t::TYPE_LEGACY);
- if (new_blocklists.count(a)) {
- new_blocklist = true;
- break;
- }
- }
- }
- }
- if (new_blocklist) {
+
+ if (new_blocklist && !blocklisted) {
auto epoch = objecter->with_osdmap([](const OSDMap &o){
return o.get_epoch();
});
std::vector<ClientMetricMessage> message;
// read latency
- metric = ClientMetricMessage(ReadLatencyPayload(logger->tget(l_c_read)));
- message.push_back(metric);
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_READ_LATENCY)) {
+ metric = ClientMetricMessage(ReadLatencyPayload(logger->tget(l_c_read)));
+ message.push_back(metric);
+ }
// write latency
- metric = ClientMetricMessage(WriteLatencyPayload(logger->tget(l_c_wrlat)));
- message.push_back(metric);
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_WRITE_LATENCY)) {
+ metric = ClientMetricMessage(WriteLatencyPayload(logger->tget(l_c_wrlat)));
+ message.push_back(metric);
+ }
// metadata latency
- metric = ClientMetricMessage(MetadataLatencyPayload(logger->tget(l_c_lat)));
- message.push_back(metric);
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_METADATA_LATENCY)) {
+ metric = ClientMetricMessage(MetadataLatencyPayload(logger->tget(l_c_lat)));
+ message.push_back(metric);
+ }
// cap hit ratio -- nr_caps is unused right now
- auto [cap_hits, cap_misses] = get_cap_hit_rates();
- metric = ClientMetricMessage(CapInfoPayload(cap_hits, cap_misses, 0));
- message.push_back(metric);
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_CAP_INFO)) {
+ auto [cap_hits, cap_misses] = get_cap_hit_rates();
+ metric = ClientMetricMessage(CapInfoPayload(cap_hits, cap_misses, 0));
+ message.push_back(metric);
+ }
// dentry lease hit ratio
- auto [dlease_hits, dlease_misses, nr] = get_dlease_hit_rates();
- metric = ClientMetricMessage(DentryLeasePayload(dlease_hits, dlease_misses, nr));
- message.push_back(metric);
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_DENTRY_LEASE)) {
+ auto [dlease_hits, dlease_misses, nr] = get_dlease_hit_rates();
+ metric = ClientMetricMessage(DentryLeasePayload(dlease_hits, dlease_misses, nr));
+ message.push_back(metric);
+ }
// opened files
- {
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_OPENED_FILES)) {
auto [opened_files, total_inodes] = get_opened_files_rates();
metric = ClientMetricMessage(OpenedFilesPayload(opened_files, total_inodes));
+ message.push_back(metric);
}
- message.push_back(metric);
// pinned i_caps
- {
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_PINNED_ICAPS)) {
auto [pinned_icaps, total_inodes] = get_pinned_icaps_rates();
metric = ClientMetricMessage(PinnedIcapsPayload(pinned_icaps, total_inodes));
+ message.push_back(metric);
}
- message.push_back(metric);
// opened inodes
- {
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_OPENED_INODES)) {
auto [opened_inodes, total_inodes] = get_opened_inodes_rates();
metric = ClientMetricMessage(OpenedInodesPayload(opened_inodes, total_inodes));
+ message.push_back(metric);
}
- message.push_back(metric);
// read io sizes
- metric = ClientMetricMessage(ReadIoSizesPayload(total_read_ops,
- total_read_size));
- message.push_back(metric);
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_READ_IO_SIZES)) {
+ metric = ClientMetricMessage(ReadIoSizesPayload(total_read_ops,
+ total_read_size));
+ message.push_back(metric);
+ }
// write io sizes
- metric = ClientMetricMessage(WriteIoSizesPayload(total_write_ops,
- total_write_size));
- message.push_back(metric);
+ if (_collect_and_send_global_metrics ||
+ session->mds_metric_flags.test(CLIENT_METRIC_TYPE_WRITE_IO_SIZES)) {
+ metric = ClientMetricMessage(WriteIoSizesPayload(total_write_ops,
+ total_write_size));
+ message.push_back(metric);
+ }
session->con->send_message2(make_message<MClientMetrics>(std::move(message)));
}
if (changed.count("client_oc_max_dirty_age")) {
objectcacher->set_max_dirty_age(cct->_conf->client_oc_max_dirty_age);
}
+ if (changed.count("client_collect_and_send_global_metrics")) {
+ _collect_and_send_global_metrics = cct->_conf.get_val<bool>(
+ "client_collect_and_send_global_metrics");
+ }
}
void intrusive_ptr_add_ref(Inode *in)