]> git.proxmox.com Git - ceph.git/blob - ceph/src/mds/SessionMap.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / mds / SessionMap.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #ifndef CEPH_MDS_SESSIONMAP_H
16 #define CEPH_MDS_SESSIONMAP_H
17
18 #include <set>
19
20 #include "include/unordered_map.h"
21
22 #include "include/Context.h"
23 #include "include/xlist.h"
24 #include "include/elist.h"
25 #include "include/interval_set.h"
26 #include "mdstypes.h"
27 #include "mds/MDSAuthCaps.h"
28 #include "common/perf_counters.h"
29 #include "common/DecayCounter.h"
30
31 #include "CInode.h"
32 #include "Capability.h"
33 #include "MDSContext.h"
34 #include "msg/Message.h"
35
36 struct MDRequestImpl;
37
38 enum {
39 l_mdssm_first = 5500,
40 l_mdssm_session_count,
41 l_mdssm_session_add,
42 l_mdssm_session_remove,
43 l_mdssm_session_open,
44 l_mdssm_session_stale,
45 l_mdssm_total_load,
46 l_mdssm_avg_load,
47 l_mdssm_avg_session_uptime,
48 l_mdssm_last,
49 };
50
51 class CInode;
52
53 /*
54 * session
55 */
56
57 class Session : public RefCountedObject {
58 // -- state etc --
59 public:
60 /*
61
62 <deleted> <-- closed <------------+
63 ^ | |
64 | v |
65 killing <-- opening <----+ |
66 ^ | | |
67 | v | |
68 stale <--> open --> closing ---+
69
70 + additional dimension of 'importing' (with counter)
71
72 */
73
74 using clock = ceph::coarse_mono_clock;
75 using time = ceph::coarse_mono_time;
76
77 enum {
78 STATE_CLOSED = 0,
79 STATE_OPENING = 1, // journaling open
80 STATE_OPEN = 2,
81 STATE_CLOSING = 3, // journaling close
82 STATE_STALE = 4,
83 STATE_KILLING = 5
84 };
85
86 Session() = delete;
87 Session(ConnectionRef con) :
88 item_session_list(this),
89 requests(member_offset(MDRequestImpl, item_session_request)),
90 recall_caps(g_conf().get_val<double>("mds_recall_warning_decay_rate")),
91 release_caps(g_conf().get_val<double>("mds_recall_warning_decay_rate")),
92 recall_caps_throttle(g_conf().get_val<double>("mds_recall_max_decay_rate")),
93 recall_caps_throttle2o(0.5),
94 session_cache_liveness(g_conf().get_val<double>("mds_session_cache_liveness_decay_rate")),
95 cap_acquisition(g_conf().get_val<double>("mds_session_cap_acquisition_decay_rate")),
96 birth_time(clock::now())
97 {
98 set_connection(std::move(con));
99 }
100 ~Session() override {
101 ceph_assert(!item_session_list.is_on_list());
102 preopen_out_queue.clear();
103 }
104
105 static std::string_view get_state_name(int s) {
106 switch (s) {
107 case STATE_CLOSED: return "closed";
108 case STATE_OPENING: return "opening";
109 case STATE_OPEN: return "open";
110 case STATE_CLOSING: return "closing";
111 case STATE_STALE: return "stale";
112 case STATE_KILLING: return "killing";
113 default: return "???";
114 }
115 }
116
117 void dump(ceph::Formatter *f, bool cap_dump=false) const;
118 void push_pv(version_t pv)
119 {
120 ceph_assert(projected.empty() || projected.back() != pv);
121 projected.push_back(pv);
122 }
123
124 void pop_pv(version_t v)
125 {
126 ceph_assert(!projected.empty());
127 ceph_assert(projected.front() == v);
128 projected.pop_front();
129 }
130
131 int get_state() const { return state; }
132 void set_state(int new_state)
133 {
134 if (state != new_state) {
135 state = new_state;
136 state_seq++;
137 }
138 }
139
140 void set_reconnecting(bool s) { reconnecting = s; }
141
142 void decode(ceph::buffer::list::const_iterator &p);
143 template<typename T>
144 void set_client_metadata(T&& meta)
145 {
146 info.client_metadata = std::forward<T>(meta);
147 _update_human_name();
148 }
149
150 const std::string& get_human_name() const {return human_name;}
151
152 size_t get_request_count() const;
153
154 void notify_cap_release(size_t n_caps);
155 uint64_t notify_recall_sent(size_t new_limit);
156 auto get_recall_caps_throttle() const {
157 return recall_caps_throttle.get();
158 }
159 auto get_recall_caps_throttle2o() const {
160 return recall_caps_throttle2o.get();
161 }
162 auto get_recall_caps() const {
163 return recall_caps.get();
164 }
165 auto get_release_caps() const {
166 return release_caps.get();
167 }
168 auto get_session_cache_liveness() const {
169 return session_cache_liveness.get();
170 }
171 auto get_cap_acquisition() const {
172 return cap_acquisition.get();
173 }
174
175 inodeno_t take_ino(inodeno_t ino = 0) {
176 if (ino) {
177 if (!info.prealloc_inos.contains(ino))
178 return 0;
179 if (delegated_inos.contains(ino)) {
180 delegated_inos.erase(ino);
181 } else if (free_prealloc_inos.contains(ino)) {
182 free_prealloc_inos.erase(ino);
183 } else {
184 ceph_assert(0);
185 }
186 } else if (!free_prealloc_inos.empty()) {
187 ino = free_prealloc_inos.range_start();
188 free_prealloc_inos.erase(ino);
189 }
190 return ino;
191 }
192
193 void delegate_inos(int want, interval_set<inodeno_t>& inos) {
194 want -= (int)delegated_inos.size();
195 if (want <= 0)
196 return;
197
198 for (auto it = free_prealloc_inos.begin(); it != free_prealloc_inos.end(); ) {
199 if (want < (int)it.get_len()) {
200 inos.insert(it.get_start(), (inodeno_t)want);
201 delegated_inos.insert(it.get_start(), (inodeno_t)want);
202 free_prealloc_inos.erase(it.get_start(), (inodeno_t)want);
203 break;
204 }
205 want -= (int)it.get_len();
206 inos.insert(it.get_start(), it.get_len());
207 delegated_inos.insert(it.get_start(), it.get_len());
208 free_prealloc_inos.erase(it++);
209 if (want <= 0)
210 break;
211 }
212 }
213
214 // sans any delegated ones
215 int get_num_prealloc_inos() const {
216 return free_prealloc_inos.size();
217 }
218
219 int get_num_projected_prealloc_inos() const {
220 return get_num_prealloc_inos() + pending_prealloc_inos.size();
221 }
222
223 client_t get_client() const {
224 return info.get_client();
225 }
226
227 std::string_view get_state_name() const { return get_state_name(state); }
228 uint64_t get_state_seq() const { return state_seq; }
229 bool is_closed() const { return state == STATE_CLOSED; }
230 bool is_opening() const { return state == STATE_OPENING; }
231 bool is_open() const { return state == STATE_OPEN; }
232 bool is_closing() const { return state == STATE_CLOSING; }
233 bool is_stale() const { return state == STATE_STALE; }
234 bool is_killing() const { return state == STATE_KILLING; }
235
236 void inc_importing() {
237 ++importing_count;
238 }
239 void dec_importing() {
240 ceph_assert(importing_count > 0);
241 --importing_count;
242 }
243 bool is_importing() const { return importing_count > 0; }
244
245 void set_load_avg_decay_rate(double rate) {
246 ceph_assert(is_open() || is_stale());
247 load_avg = DecayCounter(rate);
248 }
249 uint64_t get_load_avg() const {
250 return (uint64_t)load_avg.get();
251 }
252 void hit_session() {
253 load_avg.adjust();
254 }
255
256 double get_session_uptime() const {
257 std::chrono::duration<double> uptime = clock::now() - birth_time;
258 return uptime.count();
259 }
260
261 time get_birth_time() const {
262 return birth_time;
263 }
264
265 void inc_cap_gen() { ++cap_gen; }
266 uint32_t get_cap_gen() const { return cap_gen; }
267
268 version_t inc_push_seq() { return ++cap_push_seq; }
269 version_t get_push_seq() const { return cap_push_seq; }
270
271 version_t wait_for_flush(MDSContext* c) {
272 waitfor_flush[get_push_seq()].push_back(c);
273 return get_push_seq();
274 }
275 void finish_flush(version_t seq, MDSContext::vec& ls) {
276 while (!waitfor_flush.empty()) {
277 auto it = waitfor_flush.begin();
278 if (it->first > seq)
279 break;
280 auto& v = it->second;
281 ls.insert(ls.end(), v.begin(), v.end());
282 waitfor_flush.erase(it);
283 }
284 }
285
286 void touch_readdir_cap(uint32_t count) {
287 cap_acquisition.hit(count);
288 }
289
290 void touch_cap(Capability *cap) {
291 session_cache_liveness.hit(1.0);
292 caps.push_front(&cap->item_session_caps);
293 }
294
295 void touch_cap_bottom(Capability *cap) {
296 session_cache_liveness.hit(1.0);
297 caps.push_back(&cap->item_session_caps);
298 }
299
300 void touch_lease(ClientLease *r) {
301 session_cache_liveness.hit(1.0);
302 leases.push_back(&r->item_session_lease);
303 }
304
305 bool is_any_flush_waiter() {
306 return !waitfor_flush.empty();
307 }
308
309 void add_completed_request(ceph_tid_t t, inodeno_t created) {
310 info.completed_requests[t] = created;
311 completed_requests_dirty = true;
312 }
313 bool trim_completed_requests(ceph_tid_t mintid) {
314 // trim
315 bool erased_any = false;
316 while (!info.completed_requests.empty() &&
317 (mintid == 0 || info.completed_requests.begin()->first < mintid)) {
318 info.completed_requests.erase(info.completed_requests.begin());
319 erased_any = true;
320 }
321
322 if (erased_any) {
323 completed_requests_dirty = true;
324 }
325 return erased_any;
326 }
327 bool have_completed_request(ceph_tid_t tid, inodeno_t *pcreated) const {
328 auto p = info.completed_requests.find(tid);
329 if (p == info.completed_requests.end())
330 return false;
331 if (pcreated)
332 *pcreated = p->second;
333 return true;
334 }
335
336 void add_completed_flush(ceph_tid_t tid) {
337 info.completed_flushes.insert(tid);
338 }
339 bool trim_completed_flushes(ceph_tid_t mintid) {
340 bool erased_any = false;
341 while (!info.completed_flushes.empty() &&
342 (mintid == 0 || *info.completed_flushes.begin() < mintid)) {
343 info.completed_flushes.erase(info.completed_flushes.begin());
344 erased_any = true;
345 }
346 if (erased_any) {
347 completed_requests_dirty = true;
348 }
349 return erased_any;
350 }
351 bool have_completed_flush(ceph_tid_t tid) const {
352 return info.completed_flushes.count(tid);
353 }
354
355 uint64_t get_num_caps() const {
356 return caps.size();
357 }
358
359 unsigned get_num_completed_flushes() const { return info.completed_flushes.size(); }
360 unsigned get_num_trim_flushes_warnings() const {
361 return num_trim_flushes_warnings;
362 }
363 void inc_num_trim_flushes_warnings() { ++num_trim_flushes_warnings; }
364 void reset_num_trim_flushes_warnings() { num_trim_flushes_warnings = 0; }
365
366 unsigned get_num_completed_requests() const { return info.completed_requests.size(); }
367 unsigned get_num_trim_requests_warnings() const {
368 return num_trim_requests_warnings;
369 }
370 void inc_num_trim_requests_warnings() { ++num_trim_requests_warnings; }
371 void reset_num_trim_requests_warnings() { num_trim_requests_warnings = 0; }
372
373 bool has_dirty_completed_requests() const
374 {
375 return completed_requests_dirty;
376 }
377
378 void clear_dirty_completed_requests()
379 {
380 completed_requests_dirty = false;
381 }
382
383 int check_access(CInode *in, unsigned mask, int caller_uid, int caller_gid,
384 const std::vector<uint64_t> *gid_list, int new_uid, int new_gid);
385
386 bool fs_name_capable(std::string_view fs_name, unsigned mask) const {
387 return auth_caps.fs_name_capable(fs_name, mask);
388 }
389
390 void set_connection(ConnectionRef con) {
391 connection = std::move(con);
392 auto& c = connection;
393 if (c) {
394 info.auth_name = c->get_peer_entity_name();
395 info.inst.addr = c->get_peer_socket_addr();
396 info.inst.name = entity_name_t(c->get_peer_type(), c->get_peer_global_id());
397 }
398 }
399 const ConnectionRef& get_connection() const {
400 return connection;
401 }
402
403 void clear() {
404 pending_prealloc_inos.clear();
405 free_prealloc_inos.clear();
406 delegated_inos.clear();
407 info.clear_meta();
408
409 cap_push_seq = 0;
410 last_cap_renew = clock::zero();
411 }
412
413 Session *reclaiming_from = nullptr;
414 session_info_t info; ///< durable bits
415 MDSAuthCaps auth_caps;
416
417 xlist<Session*>::item item_session_list;
418
419 std::list<ceph::ref_t<Message>> preopen_out_queue; ///< messages for client, queued before they connect
420
421 /* This is mutable to allow get_request_count to be const. elist does not
422 * support const iterators yet.
423 */
424 mutable elist<MDRequestImpl*> requests;
425
426 interval_set<inodeno_t> pending_prealloc_inos; // journaling prealloc, will be added to prealloc_inos
427 interval_set<inodeno_t> free_prealloc_inos; //
428 interval_set<inodeno_t> delegated_inos; // hand these out to client
429
430 xlist<Capability*> caps; // inodes with caps; front=most recently used
431 xlist<ClientLease*> leases; // metadata leases to clients
432 time last_cap_renew = clock::zero();
433 time last_seen = clock::zero();
434
435 // -- leases --
436 uint32_t lease_seq = 0;
437
438 protected:
439 ConnectionRef connection;
440
441 private:
442 friend class SessionMap;
443
444 // Human (friendly) name is soft state generated from client metadata
445 void _update_human_name();
446
447 int state = STATE_CLOSED;
448 bool reconnecting = false;
449 uint64_t state_seq = 0;
450 int importing_count = 0;
451
452 std::string human_name;
453
454 // Versions in this session was projected: used to verify
455 // that appropriate mark_dirty calls follow.
456 std::deque<version_t> projected;
457
458 // request load average for this session
459 DecayCounter load_avg;
460
461 // Ephemeral state for tracking progress of capability recalls
462 // caps being recalled recently by this session; used for Beacon warnings
463 DecayCounter recall_caps; // caps that have been released
464 DecayCounter release_caps;
465 // throttle on caps recalled
466 DecayCounter recall_caps_throttle;
467 // second order throttle that prevents recalling too quickly
468 DecayCounter recall_caps_throttle2o;
469 // New limit in SESSION_RECALL
470 uint32_t recall_limit = 0;
471
472 // session caps liveness
473 DecayCounter session_cache_liveness;
474
475 // cap acquisition via readdir
476 DecayCounter cap_acquisition;
477
478 // session start time -- used to track average session time
479 // note that this is initialized in the constructor rather
480 // than at the time of adding a session to the sessionmap
481 // as journal replay of sessionmap will not call add_session().
482 time birth_time;
483
484 // -- caps --
485 uint32_t cap_gen = 0;
486 version_t cap_push_seq = 0; // cap push seq #
487 std::map<version_t, MDSContext::vec > waitfor_flush; // flush session messages
488
489 // Has completed_requests been modified since the last time we
490 // wrote this session out?
491 bool completed_requests_dirty = false;
492
493 unsigned num_trim_flushes_warnings = 0;
494 unsigned num_trim_requests_warnings = 0;
495 };
496
497 class SessionFilter
498 {
499 public:
500 SessionFilter() : reconnecting(false, false) {}
501
502 bool match(
503 const Session &session,
504 std::function<bool(client_t)> is_reconnecting) const;
505 int parse(const std::vector<std::string> &args, std::ostream *ss);
506 void set_reconnecting(bool v)
507 {
508 reconnecting.first = true;
509 reconnecting.second = v;
510 }
511
512 std::map<std::string, std::string> metadata;
513 std::string auth_name;
514 std::string state;
515 int64_t id = 0;
516 protected:
517 // First is whether to filter, second is filter value
518 std::pair<bool, bool> reconnecting;
519 };
520
521 /*
522 * session map
523 */
524
525 class MDSRank;
526
527 /**
528 * Encapsulate the serialized state associated with SessionMap. Allows
529 * encode/decode outside of live MDS instance.
530 */
531 class SessionMapStore {
532 public:
533 using clock = Session::clock;
534 using time = Session::time;
535
536 SessionMapStore(): total_load_avg(decay_rate) {}
537 virtual ~SessionMapStore() {};
538
539 version_t get_version() const {return version;}
540
541 virtual void encode_header(ceph::buffer::list *header_bl);
542 virtual void decode_header(ceph::buffer::list &header_bl);
543 virtual void decode_values(std::map<std::string, ceph::buffer::list> &session_vals);
544 virtual void decode_legacy(ceph::buffer::list::const_iterator& blp);
545 void dump(ceph::Formatter *f) const;
546
547 void set_rank(mds_rank_t r)
548 {
549 rank = r;
550 }
551
552 Session* get_or_add_session(const entity_inst_t& i) {
553 Session *s;
554 auto session_map_entry = session_map.find(i.name);
555 if (session_map_entry != session_map.end()) {
556 s = session_map_entry->second;
557 } else {
558 s = session_map[i.name] = new Session(ConnectionRef());
559 s->info.inst = i;
560 s->last_cap_renew = Session::clock::now();
561 if (logger) {
562 logger->set(l_mdssm_session_count, session_map.size());
563 logger->inc(l_mdssm_session_add);
564 }
565 }
566
567 return s;
568 }
569
570 static void generate_test_instances(std::list<SessionMapStore*>& ls);
571
572 void reset_state()
573 {
574 session_map.clear();
575 }
576
577 mds_rank_t rank = MDS_RANK_NONE;
578
579 protected:
580 version_t version = 0;
581 ceph::unordered_map<entity_name_t, Session*> session_map;
582 PerfCounters *logger =nullptr;
583
584 // total request load avg
585 double decay_rate = g_conf().get_val<double>("mds_request_load_average_decay_rate");
586 DecayCounter total_load_avg;
587 };
588
589 class SessionMap : public SessionMapStore {
590 public:
591 SessionMap() = delete;
592 explicit SessionMap(MDSRank *m) : mds(m) {}
593
594 ~SessionMap() override
595 {
596 for (auto p : by_state)
597 delete p.second;
598
599 if (logger) {
600 g_ceph_context->get_perfcounters_collection()->remove(logger);
601 }
602
603 delete logger;
604 }
605
606 uint64_t set_state(Session *session, int state);
607 void update_average_session_age();
608
609 void register_perfcounters();
610
611 void set_version(const version_t v)
612 {
613 version = projected = v;
614 }
615
616 void set_projected(const version_t v)
617 {
618 projected = v;
619 }
620
621 version_t get_projected() const
622 {
623 return projected;
624 }
625
626 version_t get_committed() const
627 {
628 return committed;
629 }
630
631 version_t get_committing() const
632 {
633 return committing;
634 }
635
636 // sessions
637 void decode_legacy(ceph::buffer::list::const_iterator& blp) override;
638 bool empty() const { return session_map.empty(); }
639 const auto& get_sessions() const {
640 return session_map;
641 }
642
643 bool is_any_state(int state) const {
644 auto it = by_state.find(state);
645 if (it == by_state.end() || it->second->empty())
646 return false;
647 return true;
648 }
649
650 bool have_unclosed_sessions() const {
651 return
652 is_any_state(Session::STATE_OPENING) ||
653 is_any_state(Session::STATE_OPEN) ||
654 is_any_state(Session::STATE_CLOSING) ||
655 is_any_state(Session::STATE_STALE) ||
656 is_any_state(Session::STATE_KILLING);
657 }
658 bool have_session(entity_name_t w) const {
659 return session_map.count(w);
660 }
661 Session* get_session(entity_name_t w) {
662 auto session_map_entry = session_map.find(w);
663 return (session_map_entry != session_map.end() ?
664 session_map_entry-> second : nullptr);
665 }
666 const Session* get_session(entity_name_t w) const {
667 ceph::unordered_map<entity_name_t, Session*>::const_iterator p = session_map.find(w);
668 if (p == session_map.end()) {
669 return NULL;
670 } else {
671 return p->second;
672 }
673 }
674
675 void add_session(Session *s);
676 void remove_session(Session *s);
677 void touch_session(Session *session);
678
679 Session *get_oldest_session(int state) {
680 auto by_state_entry = by_state.find(state);
681 if (by_state_entry == by_state.end() || by_state_entry->second->empty())
682 return 0;
683 return by_state_entry->second->front();
684 }
685
686 void dump();
687
688 template<typename F>
689 void get_client_sessions(F&& f) const {
690 for (const auto& p : session_map) {
691 auto& session = p.second;
692 if (session->info.inst.name.is_client())
693 f(session);
694 }
695 }
696 template<typename C>
697 void get_client_session_set(C& c) const {
698 auto f = [&c](auto& s) {
699 c.insert(s);
700 };
701 get_client_sessions(f);
702 }
703
704 // helpers
705 entity_inst_t& get_inst(entity_name_t w) {
706 ceph_assert(session_map.count(w));
707 return session_map[w]->info.inst;
708 }
709 version_t get_push_seq(client_t client) {
710 return get_session(entity_name_t::CLIENT(client.v))->get_push_seq();
711 }
712 bool have_completed_request(metareqid_t rid) {
713 Session *session = get_session(rid.name);
714 return session && session->have_completed_request(rid.tid, NULL);
715 }
716 void trim_completed_requests(entity_name_t c, ceph_tid_t tid) {
717 Session *session = get_session(c);
718 ceph_assert(session);
719 session->trim_completed_requests(tid);
720 }
721
722 void wipe();
723 void wipe_ino_prealloc();
724
725 object_t get_object_name() const;
726
727 void load(MDSContext *onload);
728 void _load_finish(
729 int operation_r,
730 int header_r,
731 int values_r,
732 bool first,
733 ceph::buffer::list &header_bl,
734 std::map<std::string, ceph::buffer::list> &session_vals,
735 bool more_session_vals);
736
737 void load_legacy();
738 void _load_legacy_finish(int r, ceph::buffer::list &bl);
739
740 void save(MDSContext *onsave, version_t needv=0);
741 void _save_finish(version_t v);
742
743 /**
744 * Advance the version, and mark this session
745 * as dirty within the new version.
746 *
747 * Dirty means journalled but needing writeback
748 * to the backing store. Must have called
749 * mark_projected previously for this session.
750 */
751 void mark_dirty(Session *session, bool may_save=true);
752
753 /**
754 * Advance the projected version, and mark this
755 * session as projected within the new version
756 *
757 * Projected means the session is updated in memory
758 * but we're waiting for the journal write of the update
759 * to finish. Must subsequently call mark_dirty
760 * for sessions in the same global order as calls
761 * to mark_projected.
762 */
763 version_t mark_projected(Session *session);
764
765 /**
766 * During replay, advance versions to account
767 * for a session modification, and mark the
768 * session dirty.
769 */
770 void replay_dirty_session(Session *session);
771
772 /**
773 * During replay, if a session no longer present
774 * would have consumed a version, advance `version`
775 * and `projected` to account for that.
776 */
777 void replay_advance_version();
778
779 /**
780 * During replay, open sessions, advance versions and
781 * mark these sessions as dirty.
782 */
783 void replay_open_sessions(version_t event_cmapv,
784 std::map<client_t,entity_inst_t>& client_map,
785 std::map<client_t,client_metadata_t>& client_metadata_map);
786
787 /**
788 * For these session IDs, if a session exists with this ID, and it has
789 * dirty completed_requests, then persist it immediately
790 * (ahead of usual project/dirty versioned writes
791 * of the map).
792 */
793 void save_if_dirty(const std::set<entity_name_t> &tgt_sessions,
794 MDSGatherBuilder *gather_bld);
795
796 void hit_session(Session *session);
797 void handle_conf_change(const std::set <std::string> &changed);
798
799 MDSRank *mds;
800 std::map<int,xlist<Session*>*> by_state;
801 std::map<version_t, MDSContext::vec> commit_waiters;
802
803 // -- loading, saving --
804 inodeno_t ino;
805 MDSContext::vec waiting_for_load;
806
807 protected:
808 void _mark_dirty(Session *session, bool may_save);
809
810 version_t projected = 0, committing = 0, committed = 0;
811 std::set<entity_name_t> dirty_sessions;
812 std::set<entity_name_t> null_sessions;
813 bool loaded_legacy = false;
814
815 private:
816 uint64_t get_session_count_in_state(int state) {
817 return !is_any_state(state) ? 0 : by_state[state]->size();
818 }
819
820 void update_average_birth_time(const Session &s, bool added=true) {
821 uint32_t sessions = session_map.size();
822 time birth_time = s.get_birth_time();
823
824 if (sessions == 1) {
825 avg_birth_time = added ? birth_time : clock::zero();
826 return;
827 }
828
829 if (added) {
830 avg_birth_time = clock::time_point(
831 ((avg_birth_time - clock::zero()) / sessions) * (sessions - 1) +
832 (birth_time - clock::zero()) / sessions);
833 } else {
834 avg_birth_time = clock::time_point(
835 ((avg_birth_time - clock::zero()) / (sessions - 1)) * sessions -
836 (birth_time - clock::zero()) / (sessions - 1));
837 }
838 }
839
840 time avg_birth_time = clock::zero();
841 };
842
843 std::ostream& operator<<(std::ostream &out, const Session &s);
844 #endif