]> git.proxmox.com Git - ceph.git/blob - ceph/src/mds/MDCache.h
update sources to 12.2.7
[ceph.git] / ceph / src / mds / MDCache.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15
16
17 #ifndef CEPH_MDCACHE_H
18 #define CEPH_MDCACHE_H
19
20 #include <boost/utility/string_view.hpp>
21
22 #include "include/types.h"
23 #include "include/filepath.h"
24 #include "include/elist.h"
25
26 #include "osdc/Filer.h"
27 #include "CInode.h"
28 #include "CDentry.h"
29 #include "CDir.h"
30 #include "include/Context.h"
31 #include "events/EMetaBlob.h"
32 #include "RecoveryQueue.h"
33 #include "StrayManager.h"
34 #include "MDSContext.h"
35 #include "MDSMap.h"
36 #include "Mutation.h"
37
38 #include "messages/MClientRequest.h"
39 #include "messages/MMDSSlaveRequest.h"
40
41 class PerfCounters;
42
43 class MDSRank;
44 class Session;
45 class Migrator;
46
47 class Message;
48 class Session;
49
50 class MMDSResolve;
51 class MMDSResolveAck;
52 class MMDSCacheRejoin;
53 class MDiscover;
54 class MDiscoverReply;
55 class MCacheExpire;
56 class MDirUpdate;
57 class MDentryLink;
58 class MDentryUnlink;
59 class MLock;
60 struct MMDSFindIno;
61 struct MMDSFindInoReply;
62 struct MMDSOpenIno;
63 struct MMDSOpenInoReply;
64
65 class Message;
66 class MClientRequest;
67 class MMDSSlaveRequest;
68 struct MClientSnap;
69
70 class MMDSFragmentNotify;
71
72 class ESubtreeMap;
73
74 enum {
75 l_mdc_first = 3000,
76 // How many inodes currently in stray dentries
77 l_mdc_num_strays,
78 // How many stray dentries are currently delayed for purge due to refs
79 l_mdc_num_strays_delayed,
80 // How many stray dentries are currently being enqueued for purge
81 l_mdc_num_strays_enqueuing,
82
83 // How many dentries have ever been added to stray dir
84 l_mdc_strays_created,
85 // How many dentries have been passed on to PurgeQueue
86 l_mdc_strays_enqueued,
87 // How many strays have been reintegrated?
88 l_mdc_strays_reintegrated,
89 // How many strays have been migrated?
90 l_mdc_strays_migrated,
91
92 // How many inode sizes currently being recovered
93 l_mdc_num_recovering_processing,
94 // How many inodes currently waiting to have size recovered
95 l_mdc_num_recovering_enqueued,
96 // How many inodes waiting with elevated priority for recovery
97 l_mdc_num_recovering_prioritized,
98 // How many inodes ever started size recovery
99 l_mdc_recovery_started,
100 // How many inodes ever completed size recovery
101 l_mdc_recovery_completed,
102
103 l_mdss_ireq_enqueue_scrub,
104 l_mdss_ireq_exportdir,
105 l_mdss_ireq_flush,
106 l_mdss_ireq_fragmentdir,
107 l_mdss_ireq_fragstats,
108 l_mdss_ireq_inodestats,
109
110 l_mdc_last,
111 };
112
113
114 // flags for predirty_journal_parents()
115 static const int PREDIRTY_PRIMARY = 1; // primary dn, adjust nested accounting
116 static const int PREDIRTY_DIR = 2; // update parent dir mtime/size
117 static const int PREDIRTY_SHALLOW = 4; // only go to immediate parent (for easier rollback)
118
119 class MDCache {
120 public:
121 // my master
122 MDSRank *mds;
123
124 // -- my cache --
125 LRU lru; // dentry lru for expiring items from cache
126 LRU bottom_lru; // dentries that should be trimmed ASAP
127 protected:
128 ceph::unordered_map<inodeno_t,CInode*> inode_map; // map of head inodes by ino
129 map<vinodeno_t, CInode*> snap_inode_map; // map of snap inodes by ino
130 CInode *root; // root inode
131 CInode *myin; // .ceph/mds%d dir
132
133 bool readonly;
134 void set_readonly() { readonly = true; }
135
136 CInode *strays[NUM_STRAY]; // my stray dir
137 int stray_index;
138
139 CInode *get_stray() {
140 return strays[stray_index];
141 }
142
143 set<CInode*> base_inodes;
144
145 std::unique_ptr<PerfCounters> logger;
146
147 Filer filer;
148
149 bool exceeded_size_limit;
150
151 public:
152 static uint64_t cache_limit_inodes(void) {
153 return g_conf->get_val<int64_t>("mds_cache_size");
154 }
155 static uint64_t cache_limit_memory(void) {
156 return g_conf->get_val<uint64_t>("mds_cache_memory_limit");
157 }
158 static double cache_reservation(void) {
159 return g_conf->get_val<double>("mds_cache_reservation");
160 }
161 static double cache_mid(void) {
162 return g_conf->get_val<double>("mds_cache_mid");
163 }
164 static double cache_health_threshold(void) {
165 return g_conf->get_val<double>("mds_health_cache_threshold");
166 }
167 double cache_toofull_ratio(void) const {
168 uint64_t inode_limit = cache_limit_inodes();
169 double inode_reserve = inode_limit*(1.0-cache_reservation());
170 double memory_reserve = cache_limit_memory()*(1.0-cache_reservation());
171 return fmax(0.0, fmax((cache_size()-memory_reserve)/memory_reserve, inode_limit == 0 ? 0.0 : (CInode::count()-inode_reserve)/inode_reserve));
172 }
173 bool cache_toofull(void) const {
174 return cache_toofull_ratio() > 0.0;
175 }
176 uint64_t cache_size(void) const {
177 return mempool::get_pool(mempool::mds_co::id).allocated_bytes();
178 }
179 bool cache_overfull(void) const {
180 uint64_t inode_limit = cache_limit_inodes();
181 return (inode_limit > 0 && CInode::count() > inode_limit*cache_health_threshold()) || (cache_size() > cache_limit_memory()*cache_health_threshold());
182 }
183
184 void advance_stray() {
185 stray_index = (stray_index+1)%NUM_STRAY;
186 }
187
188 void activate_stray_manager();
189
190 /**
191 * Call this when you know that a CDentry is ready to be passed
192 * on to StrayManager (i.e. this is a stray you've just created)
193 */
194 void notify_stray(CDentry *dn) {
195 assert(dn->get_dir()->get_inode()->is_stray());
196 stray_manager.eval_stray(dn);
197 }
198
199 void maybe_eval_stray(CInode *in, bool delay=false);
200 void clear_dirty_bits_for_stray(CInode* diri);
201
202 bool is_readonly() { return readonly; }
203 void force_readonly();
204
205 DecayRate decayrate;
206
207 int num_shadow_inodes;
208
209 int num_inodes_with_caps;
210
211 unsigned max_dir_commit_size;
212
213 static file_layout_t gen_default_file_layout(const MDSMap &mdsmap);
214 static file_layout_t gen_default_log_layout(const MDSMap &mdsmap);
215
216 file_layout_t default_file_layout;
217 file_layout_t default_log_layout;
218
219 void register_perfcounters();
220
221 // -- client leases --
222 public:
223 static const int client_lease_pools = 3;
224 float client_lease_durations[client_lease_pools];
225 protected:
226 xlist<ClientLease*> client_leases[client_lease_pools];
227 public:
228 void touch_client_lease(ClientLease *r, int pool, utime_t ttl) {
229 client_leases[pool].push_back(&r->item_lease);
230 r->ttl = ttl;
231 }
232
233 void notify_stray_removed()
234 {
235 stray_manager.notify_stray_removed();
236 }
237
238 void notify_stray_created()
239 {
240 stray_manager.notify_stray_created();
241 }
242
243 void eval_remote(CDentry *dn)
244 {
245 stray_manager.eval_remote(dn);
246 }
247
248 // -- client caps --
249 uint64_t last_cap_id;
250
251
252
253 // -- discover --
254 struct discover_info_t {
255 ceph_tid_t tid;
256 mds_rank_t mds;
257 inodeno_t ino;
258 frag_t frag;
259 snapid_t snap;
260 filepath want_path;
261 CInode *basei;
262 bool want_base_dir;
263 bool want_xlocked;
264
265 discover_info_t() :
266 tid(0), mds(-1), snap(CEPH_NOSNAP), basei(NULL),
267 want_base_dir(false), want_xlocked(false) {}
268 ~discover_info_t() {
269 if (basei)
270 basei->put(MDSCacheObject::PIN_DISCOVERBASE);
271 }
272 void pin_base(CInode *b) {
273 basei = b;
274 basei->get(MDSCacheObject::PIN_DISCOVERBASE);
275 }
276 };
277
278 map<ceph_tid_t, discover_info_t> discovers;
279 ceph_tid_t discover_last_tid;
280
281 void _send_discover(discover_info_t& dis);
282 discover_info_t& _create_discover(mds_rank_t mds) {
283 ceph_tid_t t = ++discover_last_tid;
284 discover_info_t& d = discovers[t];
285 d.tid = t;
286 d.mds = mds;
287 return d;
288 }
289
290 // waiters
291 map<int, map<inodeno_t, list<MDSInternalContextBase*> > > waiting_for_base_ino;
292
293 void discover_base_ino(inodeno_t want_ino, MDSInternalContextBase *onfinish, mds_rank_t from=MDS_RANK_NONE);
294 void discover_dir_frag(CInode *base, frag_t approx_fg, MDSInternalContextBase *onfinish,
295 mds_rank_t from=MDS_RANK_NONE);
296 void discover_path(CInode *base, snapid_t snap, filepath want_path, MDSInternalContextBase *onfinish,
297 bool want_xlocked=false, mds_rank_t from=MDS_RANK_NONE);
298 void discover_path(CDir *base, snapid_t snap, filepath want_path, MDSInternalContextBase *onfinish,
299 bool want_xlocked=false);
300 void kick_discovers(mds_rank_t who); // after a failure.
301
302
303 // -- subtrees --
304 protected:
305 /* subtree keys and each tree's non-recursive nested subtrees (the "bounds") */
306 map<CDir*,set<CDir*> > subtrees;
307 map<CInode*,list<pair<CDir*,CDir*> > > projected_subtree_renames; // renamed ino -> target dir
308
309 // adjust subtree auth specification
310 // dir->dir_auth
311 // imports/exports/nested_exports
312 // join/split subtrees as appropriate
313 public:
314 bool is_subtrees() { return !subtrees.empty(); }
315 void list_subtrees(list<CDir*>& ls);
316 void adjust_subtree_auth(CDir *root, mds_authority_t auth, bool adjust_pop=true);
317 void adjust_subtree_auth(CDir *root, mds_rank_t a, mds_rank_t b=CDIR_AUTH_UNKNOWN) {
318 adjust_subtree_auth(root, mds_authority_t(a,b));
319 }
320 void adjust_bounded_subtree_auth(CDir *dir, set<CDir*>& bounds, mds_authority_t auth);
321 void adjust_bounded_subtree_auth(CDir *dir, set<CDir*>& bounds, mds_rank_t a) {
322 adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
323 }
324 void adjust_bounded_subtree_auth(CDir *dir, vector<dirfrag_t>& bounds, mds_authority_t auth);
325 void adjust_bounded_subtree_auth(CDir *dir, vector<dirfrag_t>& bounds, mds_rank_t a) {
326 adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
327 }
328 void map_dirfrag_set(list<dirfrag_t>& dfs, set<CDir*>& result);
329 void try_subtree_merge(CDir *root);
330 void try_subtree_merge_at(CDir *root, set<CInode*> *to_eval, bool adjust_pop=true);
331 void subtree_merge_writebehind_finish(CInode *in, MutationRef& mut);
332 void eval_subtree_root(CInode *diri);
333 CDir *get_subtree_root(CDir *dir);
334 CDir *get_projected_subtree_root(CDir *dir);
335 bool is_leaf_subtree(CDir *dir) {
336 assert(subtrees.count(dir));
337 return subtrees[dir].empty();
338 }
339 void remove_subtree(CDir *dir);
340 bool is_subtree(CDir *root) {
341 return subtrees.count(root);
342 }
343 void get_subtree_bounds(CDir *root, set<CDir*>& bounds);
344 void get_wouldbe_subtree_bounds(CDir *root, set<CDir*>& bounds);
345 void verify_subtree_bounds(CDir *root, const set<CDir*>& bounds);
346 void verify_subtree_bounds(CDir *root, const list<dirfrag_t>& bounds);
347
348 void project_subtree_rename(CInode *diri, CDir *olddir, CDir *newdir);
349 void adjust_subtree_after_rename(CInode *diri, CDir *olddir, bool pop);
350
351 void get_auth_subtrees(set<CDir*>& s);
352 void get_fullauth_subtrees(set<CDir*>& s);
353
354 int num_subtrees();
355 int num_subtrees_fullauth();
356 int num_subtrees_fullnonauth();
357
358
359 protected:
360 // delayed cache expire
361 map<CDir*, map<mds_rank_t, MCacheExpire*> > delayed_expire; // subtree root -> expire msg
362
363
364 // -- requests --
365 ceph::unordered_map<metareqid_t, MDRequestRef> active_requests;
366
367 public:
368 int get_num_client_requests();
369
370 MDRequestRef request_start(MClientRequest *req);
371 MDRequestRef request_start_slave(metareqid_t rid, __u32 attempt, Message *m);
372 MDRequestRef request_start_internal(int op);
373 bool have_request(metareqid_t rid) {
374 return active_requests.count(rid);
375 }
376 MDRequestRef request_get(metareqid_t rid);
377 void request_pin_ref(MDRequestRef& r, CInode *ref, vector<CDentry*>& trace);
378 void request_finish(MDRequestRef& mdr);
379 void request_forward(MDRequestRef& mdr, mds_rank_t mds, int port=0);
380 void dispatch_request(MDRequestRef& mdr);
381 void request_drop_foreign_locks(MDRequestRef& mdr);
382 void request_drop_non_rdlocks(MDRequestRef& r);
383 void request_drop_locks(MDRequestRef& r);
384 void request_cleanup(MDRequestRef& r);
385
386 void request_kill(MDRequestRef& r); // called when session closes
387
388 // journal/snap helpers
389 CInode *pick_inode_snap(CInode *in, snapid_t follows);
390 CInode *cow_inode(CInode *in, snapid_t last);
391 void journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, CDentry *dn,
392 snapid_t follows=CEPH_NOSNAP,
393 CInode **pcow_inode=0, CDentry::linkage_t *dnl=0);
394 void journal_cow_inode(MutationRef& mut, EMetaBlob *metablob, CInode *in, snapid_t follows=CEPH_NOSNAP,
395 CInode **pcow_inode=0);
396 void journal_dirty_inode(MutationImpl *mut, EMetaBlob *metablob, CInode *in, snapid_t follows=CEPH_NOSNAP);
397
398 void project_rstat_inode_to_frag(CInode *cur, CDir *parent, snapid_t first,
399 int linkunlink, SnapRealm *prealm);
400 void _project_rstat_inode_to_frag(CInode::mempool_inode & inode, snapid_t ofirst, snapid_t last,
401 CDir *parent, int linkunlink, bool update_inode);
402 void project_rstat_frag_to_inode(nest_info_t& rstat, nest_info_t& accounted_rstat,
403 snapid_t ofirst, snapid_t last,
404 CInode *pin, bool cow_head);
405 void broadcast_quota_to_client(CInode *in, client_t exclude_ct = -1);
406 void predirty_journal_parents(MutationRef mut, EMetaBlob *blob,
407 CInode *in, CDir *parent,
408 int flags, int linkunlink=0,
409 snapid_t follows=CEPH_NOSNAP);
410
411 // slaves
412 void add_uncommitted_master(metareqid_t reqid, LogSegment *ls, set<mds_rank_t> &slaves, bool safe=false) {
413 uncommitted_masters[reqid].ls = ls;
414 uncommitted_masters[reqid].slaves = slaves;
415 uncommitted_masters[reqid].safe = safe;
416 }
417 void wait_for_uncommitted_master(metareqid_t reqid, MDSInternalContextBase *c) {
418 uncommitted_masters[reqid].waiters.push_back(c);
419 }
420 bool have_uncommitted_master(metareqid_t reqid, mds_rank_t from) {
421 auto p = uncommitted_masters.find(reqid);
422 return p != uncommitted_masters.end() && p->second.slaves.count(from) > 0;
423 }
424 void log_master_commit(metareqid_t reqid);
425 void logged_master_update(metareqid_t reqid);
426 void _logged_master_commit(metareqid_t reqid);
427 void committed_master_slave(metareqid_t r, mds_rank_t from);
428 void finish_committed_masters();
429
430 void _logged_slave_commit(mds_rank_t from, metareqid_t reqid);
431
432 // -- recovery --
433 protected:
434 set<mds_rank_t> recovery_set;
435
436 public:
437 void set_recovery_set(set<mds_rank_t>& s);
438 void handle_mds_failure(mds_rank_t who);
439 void handle_mds_recovery(mds_rank_t who);
440
441 protected:
442 // [resolve]
443 // from EImportStart w/o EImportFinish during journal replay
444 map<dirfrag_t, vector<dirfrag_t> > my_ambiguous_imports;
445 // from MMDSResolves
446 map<mds_rank_t, map<dirfrag_t, vector<dirfrag_t> > > other_ambiguous_imports;
447
448 map<mds_rank_t, map<metareqid_t, MDSlaveUpdate*> > uncommitted_slave_updates; // slave: for replay.
449 map<CInode*, int> uncommitted_slave_rename_olddir; // slave: preserve the non-auth dir until seeing commit.
450 map<CInode*, int> uncommitted_slave_unlink; // slave: preserve the unlinked inode until seeing commit.
451
452 // track master requests whose slaves haven't acknowledged commit
453 struct umaster {
454 set<mds_rank_t> slaves;
455 LogSegment *ls;
456 list<MDSInternalContextBase*> waiters;
457 bool safe;
458 bool committing;
459 bool recovering;
460 umaster() : ls(NULL), safe(false), committing(false), recovering(false) {}
461 };
462 map<metareqid_t, umaster> uncommitted_masters; // master: req -> slave set
463
464 set<metareqid_t> pending_masters;
465 map<int, set<metareqid_t> > ambiguous_slave_updates;
466
467 friend class ESlaveUpdate;
468 friend class ECommitted;
469
470 bool resolves_pending;
471 set<mds_rank_t> resolve_gather; // nodes i need resolves from
472 set<mds_rank_t> resolve_ack_gather; // nodes i need a resolve_ack from
473 map<metareqid_t, mds_rank_t> need_resolve_rollback; // rollbacks i'm writing to the journal
474 map<mds_rank_t, MMDSResolve*> delayed_resolve;
475
476 void handle_resolve(MMDSResolve *m);
477 void handle_resolve_ack(MMDSResolveAck *m);
478 void process_delayed_resolve();
479 void discard_delayed_resolve(mds_rank_t who);
480 void maybe_resolve_finish();
481 void disambiguate_my_imports();
482 void disambiguate_other_imports();
483 void trim_unlinked_inodes();
484 void add_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master, MDSlaveUpdate*);
485 void finish_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master);
486 MDSlaveUpdate* get_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master);
487 public:
488 void recalc_auth_bits(bool replay);
489 void remove_inode_recursive(CInode *in);
490
491 bool is_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
492 auto p = ambiguous_slave_updates.find(master);
493 return p != ambiguous_slave_updates.end() && p->second.count(reqid);
494 }
495 void add_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
496 ambiguous_slave_updates[master].insert(reqid);
497 }
498 void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
499 auto p = ambiguous_slave_updates.find(master);
500 auto q = p->second.find(reqid);
501 assert(q != p->second.end());
502 p->second.erase(q);
503 if (p->second.empty())
504 ambiguous_slave_updates.erase(p);
505 }
506
507 void add_rollback(metareqid_t reqid, mds_rank_t master) {
508 need_resolve_rollback[reqid] = master;
509 }
510 void finish_rollback(metareqid_t reqid);
511
512 // ambiguous imports
513 void add_ambiguous_import(dirfrag_t base, const vector<dirfrag_t>& bounds);
514 void add_ambiguous_import(CDir *base, const set<CDir*>& bounds);
515 bool have_ambiguous_import(dirfrag_t base) {
516 return my_ambiguous_imports.count(base);
517 }
518 void get_ambiguous_import_bounds(dirfrag_t base, vector<dirfrag_t>& bounds) {
519 assert(my_ambiguous_imports.count(base));
520 bounds = my_ambiguous_imports[base];
521 }
522 void cancel_ambiguous_import(CDir *);
523 void finish_ambiguous_import(dirfrag_t dirino);
524 void resolve_start(MDSInternalContext *resolve_done_);
525 void send_resolves();
526 void send_slave_resolves();
527 void send_subtree_resolves();
528 void maybe_send_pending_resolves() {
529 if (resolves_pending)
530 send_subtree_resolves();
531 }
532
533 void _move_subtree_map_bound(dirfrag_t df, dirfrag_t oldparent, dirfrag_t newparent,
534 map<dirfrag_t,vector<dirfrag_t> >& subtrees);
535 ESubtreeMap *create_subtree_map();
536
537
538 void clean_open_file_lists();
539
540 protected:
541 // [rejoin]
542 bool rejoins_pending;
543 set<mds_rank_t> rejoin_gather; // nodes from whom i need a rejoin
544 set<mds_rank_t> rejoin_sent; // nodes i sent a rejoin to
545 set<mds_rank_t> rejoin_ack_sent; // nodes i sent a rejoin to
546 set<mds_rank_t> rejoin_ack_gather; // nodes from whom i need a rejoin ack
547 map<mds_rank_t,map<inodeno_t,map<client_t,Capability::Import> > > rejoin_imported_caps;
548 map<inodeno_t,pair<mds_rank_t,map<client_t,Capability::Export> > > rejoin_slave_exports;
549 map<client_t,entity_inst_t> rejoin_client_map;
550 map<client_t,pair<Session*,uint64_t> > rejoin_session_map;
551
552 map<inodeno_t,pair<mds_rank_t,map<client_t,cap_reconnect_t> > > cap_exports; // ino -> target, client -> capex
553
554 map<inodeno_t,map<client_t,map<mds_rank_t,cap_reconnect_t> > > cap_imports; // ino -> client -> frommds -> capex
555 set<inodeno_t> cap_imports_missing;
556 map<inodeno_t, list<MDSInternalContextBase*> > cap_reconnect_waiters;
557 int cap_imports_num_opening;
558
559 set<CInode*> rejoin_undef_inodes;
560 set<CInode*> rejoin_potential_updated_scatterlocks;
561 set<CDir*> rejoin_undef_dirfrags;
562 map<mds_rank_t, set<CInode*> > rejoin_unlinked_inodes;
563
564 vector<CInode*> rejoin_recover_q, rejoin_check_q;
565 list<SimpleLock*> rejoin_eval_locks;
566 list<MDSInternalContextBase*> rejoin_waiters;
567
568 void rejoin_walk(CDir *dir, MMDSCacheRejoin *rejoin);
569 void handle_cache_rejoin(MMDSCacheRejoin *m);
570 void handle_cache_rejoin_weak(MMDSCacheRejoin *m);
571 CInode* rejoin_invent_inode(inodeno_t ino, snapid_t last);
572 CDir* rejoin_invent_dirfrag(dirfrag_t df);
573 void handle_cache_rejoin_strong(MMDSCacheRejoin *m);
574 void rejoin_scour_survivor_replicas(mds_rank_t from, MMDSCacheRejoin *ack,
575 set<vinodeno_t>& acked_inodes,
576 set<SimpleLock *>& gather_locks);
577 void handle_cache_rejoin_ack(MMDSCacheRejoin *m);
578 void rejoin_send_acks();
579 void rejoin_trim_undef_inodes();
580 void maybe_send_pending_rejoins() {
581 if (rejoins_pending)
582 rejoin_send_rejoins();
583 }
584 std::unique_ptr<MDSInternalContext> rejoin_done;
585 std::unique_ptr<MDSInternalContext> resolve_done;
586 public:
587 void rejoin_start(MDSInternalContext *rejoin_done_);
588 void rejoin_gather_finish();
589 void rejoin_send_rejoins();
590 void rejoin_export_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
591 int target=-1) {
592 auto& ex = cap_exports[ino];
593 ex.first = target;
594 ex.second[client] = icr;
595 }
596 void rejoin_recovered_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
597 mds_rank_t frommds=MDS_RANK_NONE) {
598 cap_imports[ino][client][frommds] = icr;
599 }
600 void rejoin_recovered_client(client_t client, const entity_inst_t& inst) {
601 rejoin_client_map.emplace(client, inst);
602 }
603 const cap_reconnect_t *get_replay_cap_reconnect(inodeno_t ino, client_t client) {
604 if (cap_imports.count(ino) &&
605 cap_imports[ino].count(client) &&
606 cap_imports[ino][client].count(MDS_RANK_NONE)) {
607 return &cap_imports[ino][client][MDS_RANK_NONE];
608 }
609 return NULL;
610 }
611 void remove_replay_cap_reconnect(inodeno_t ino, client_t client) {
612 assert(cap_imports[ino].size() == 1);
613 assert(cap_imports[ino][client].size() == 1);
614 cap_imports.erase(ino);
615 }
616 void wait_replay_cap_reconnect(inodeno_t ino, MDSInternalContextBase *c) {
617 cap_reconnect_waiters[ino].push_back(c);
618 }
619
620 // [reconnect/rejoin caps]
621 struct reconnected_cap_info_t {
622 inodeno_t realm_ino;
623 snapid_t snap_follows;
624 int dirty_caps;
625 reconnected_cap_info_t() :
626 realm_ino(0), snap_follows(0), dirty_caps(0) {}
627 };
628 map<inodeno_t,map<client_t, reconnected_cap_info_t> > reconnected_caps; // inode -> client -> snap_follows,realmino
629 map<inodeno_t,map<client_t, snapid_t> > reconnected_snaprealms; // realmino -> client -> realmseq
630
631 void add_reconnected_cap(client_t client, inodeno_t ino, const cap_reconnect_t& icr) {
632 reconnected_cap_info_t &info = reconnected_caps[ino][client];
633 info.realm_ino = inodeno_t(icr.capinfo.snaprealm);
634 info.snap_follows = icr.snap_follows;
635 }
636 void set_reconnected_dirty_caps(client_t client, inodeno_t ino, int dirty) {
637 reconnected_cap_info_t &info = reconnected_caps[ino][client];
638 info.dirty_caps |= dirty;
639 }
640 void add_reconnected_snaprealm(client_t client, inodeno_t ino, snapid_t seq) {
641 reconnected_snaprealms[ino][client] = seq;
642 }
643
644 friend class C_MDC_RejoinOpenInoFinish;
645 friend class C_MDC_RejoinSessionsOpened;
646 void rejoin_open_ino_finish(inodeno_t ino, int ret);
647 void rejoin_open_sessions_finish(map<client_t,pair<Session*,uint64_t> >& session_map);
648 bool process_imported_caps();
649 void choose_lock_states_and_reconnect_caps();
650 void prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t ino,
651 map<client_t,MClientSnap*>& splits);
652 void do_realm_invalidate_and_update_notify(CInode *in, int snapop, bool nosend=false);
653 void send_snaps(map<client_t,MClientSnap*>& splits);
654 Capability* rejoin_import_cap(CInode *in, client_t client, const cap_reconnect_t& icr, mds_rank_t frommds);
655 void finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snapid_t seq);
656 void try_reconnect_cap(CInode *in, Session *session);
657 void export_remaining_imported_caps();
658
659 // cap imports. delayed snap parent opens.
660 // realm inode -> client -> cap inodes needing to split to this realm
661 map<CInode*,set<CInode*> > missing_snap_parents;
662 map<client_t,set<CInode*> > delayed_imported_caps;
663
664 void do_cap_import(Session *session, CInode *in, Capability *cap,
665 uint64_t p_cap_id, ceph_seq_t p_seq, ceph_seq_t p_mseq,
666 int peer, int p_flags);
667 void do_delayed_cap_imports();
668 void rebuild_need_snapflush(CInode *head_in, SnapRealm *realm, client_t client,
669 snapid_t snap_follows);
670 void check_realm_past_parents(SnapRealm *realm, bool reconnect);
671 void open_snap_parents();
672
673 bool open_undef_inodes_dirfrags();
674 void opened_undef_inode(CInode *in);
675 void opened_undef_dirfrag(CDir *dir) {
676 rejoin_undef_dirfrags.erase(dir);
677 }
678
679 void reissue_all_caps();
680
681
682 friend class Locker;
683 friend class Migrator;
684 friend class MDBalancer;
685
686 // StrayManager needs to be able to remove_inode() from us
687 // when it is done purging
688 friend class StrayManager;
689
690 // File size recovery
691 private:
692 RecoveryQueue recovery_queue;
693 void identify_files_to_recover();
694 public:
695 void start_files_to_recover();
696 void do_file_recover();
697 void queue_file_recover(CInode *in);
698 void _queued_file_recover_cow(CInode *in, MutationRef& mut);
699
700 // subsystems
701 std::unique_ptr<Migrator> migrator;
702
703 public:
704 explicit MDCache(MDSRank *m, PurgeQueue &purge_queue_);
705 ~MDCache();
706
707 // debug
708 void log_stat();
709
710 // root inode
711 CInode *get_root() { return root; }
712 CInode *get_myin() { return myin; }
713
714 size_t get_cache_size() { return lru.lru_get_size(); }
715
716 // trimming
717 bool trim(uint64_t count=0);
718 private:
719 void trim_lru(uint64_t count, map<mds_rank_t, MCacheExpire*>& expiremap);
720 bool trim_dentry(CDentry *dn, map<mds_rank_t, MCacheExpire*>& expiremap);
721 void trim_dirfrag(CDir *dir, CDir *con,
722 map<mds_rank_t, MCacheExpire*>& expiremap);
723 bool trim_inode(CDentry *dn, CInode *in, CDir *con,
724 map<mds_rank_t,class MCacheExpire*>& expiremap);
725 void send_expire_messages(map<mds_rank_t, MCacheExpire*>& expiremap);
726 void trim_non_auth(); // trim out trimmable non-auth items
727 public:
728 bool trim_non_auth_subtree(CDir *directory);
729 void standby_trim_segment(LogSegment *ls);
730 void try_trim_non_auth_subtree(CDir *dir);
731 bool can_trim_non_auth_dirfrag(CDir *dir) {
732 return my_ambiguous_imports.count((dir)->dirfrag()) == 0 &&
733 uncommitted_slave_rename_olddir.count(dir->inode) == 0;
734 }
735
736 /**
737 * For all unreferenced inodes, dirs, dentries below an inode, compose
738 * expiry messages. This is used when giving up all replicas of entities
739 * for an MDS peer in the 'stopping' state, such that the peer can
740 * empty its cache and finish shutting down.
741 *
742 * We have to make sure we're only expiring un-referenced items to
743 * avoid interfering with ongoing stray-movement (we can't distinguish
744 * between the "moving my strays" and "waiting for my cache to empty"
745 * phases within 'stopping')
746 *
747 * @return false if we completed cleanly, true if caller should stop
748 * expiring because we hit something with refs.
749 */
750 bool expire_recursive(
751 CInode *in,
752 std::map<mds_rank_t, MCacheExpire*>& expiremap);
753
754 void trim_client_leases();
755 void check_memory_usage();
756
757 utime_t last_recall_state;
758
759 // shutdown
760 private:
761 set<inodeno_t> shutdown_exported_strays;
762 public:
763 void shutdown_start();
764 void shutdown_check();
765 bool shutdown_pass();
766 bool shutdown_export_strays();
767 bool shutdown(); // clear cache (ie at shutodwn)
768
769 bool did_shutdown_log_cap;
770
771 // inode_map
772 bool have_inode(vinodeno_t vino) {
773 if (vino.snapid == CEPH_NOSNAP)
774 return inode_map.count(vino.ino) ? true : false;
775 else
776 return snap_inode_map.count(vino) ? true : false;
777 }
778 bool have_inode(inodeno_t ino, snapid_t snap=CEPH_NOSNAP) {
779 return have_inode(vinodeno_t(ino, snap));
780 }
781 CInode* get_inode(vinodeno_t vino) {
782 if (vino.snapid == CEPH_NOSNAP) {
783 auto p = inode_map.find(vino.ino);
784 if (p != inode_map.end())
785 return p->second;
786 } else {
787 auto p = snap_inode_map.find(vino);
788 if (p != snap_inode_map.end())
789 return p->second;
790 }
791 return NULL;
792 }
793 CInode* get_inode(inodeno_t ino, snapid_t s=CEPH_NOSNAP) {
794 return get_inode(vinodeno_t(ino, s));
795 }
796
797 CDir* get_dirfrag(dirfrag_t df) {
798 CInode *in = get_inode(df.ino);
799 if (!in)
800 return NULL;
801 return in->get_dirfrag(df.frag);
802 }
803 CDir* get_dirfrag(inodeno_t ino, boost::string_view dn) {
804 CInode *in = get_inode(ino);
805 if (!in)
806 return NULL;
807 frag_t fg = in->pick_dirfrag(dn);
808 return in->get_dirfrag(fg);
809 }
810 CDir* get_force_dirfrag(dirfrag_t df, bool replay) {
811 CInode *diri = get_inode(df.ino);
812 if (!diri)
813 return NULL;
814 CDir *dir = force_dir_fragment(diri, df.frag, replay);
815 if (!dir)
816 dir = diri->get_dirfrag(df.frag);
817 return dir;
818 }
819
820 MDSCacheObject *get_object(MDSCacheObjectInfo &info);
821
822
823
824 public:
825 void add_inode(CInode *in);
826
827 void remove_inode(CInode *in);
828 protected:
829 void touch_inode(CInode *in) {
830 if (in->get_parent_dn())
831 touch_dentry(in->get_projected_parent_dn());
832 }
833 public:
834 void touch_dentry(CDentry *dn) {
835 if (dn->state_test(CDentry::STATE_BOTTOMLRU)) {
836 bottom_lru.lru_midtouch(dn);
837 } else {
838 if (dn->is_auth())
839 lru.lru_touch(dn);
840 else
841 lru.lru_midtouch(dn);
842 }
843 }
844 void touch_dentry_bottom(CDentry *dn) {
845 if (dn->state_test(CDentry::STATE_BOTTOMLRU))
846 return;
847 lru.lru_bottouch(dn);
848 }
849 protected:
850
851 void inode_remove_replica(CInode *in, mds_rank_t rep, bool rejoin,
852 set<SimpleLock *>& gather_locks);
853 void dentry_remove_replica(CDentry *dn, mds_rank_t rep, set<SimpleLock *>& gather_locks);
854
855 void rename_file(CDentry *srcdn, CDentry *destdn);
856
857 public:
858 // truncate
859 void truncate_inode(CInode *in, LogSegment *ls);
860 void _truncate_inode(CInode *in, LogSegment *ls);
861 void truncate_inode_finish(CInode *in, LogSegment *ls);
862 void truncate_inode_logged(CInode *in, MutationRef& mut);
863
864 void add_recovered_truncate(CInode *in, LogSegment *ls);
865 void remove_recovered_truncate(CInode *in, LogSegment *ls);
866 void start_recovered_truncates();
867
868
869 public:
870 CDir *get_auth_container(CDir *in);
871 CDir *get_export_container(CDir *dir);
872 void find_nested_exports(CDir *dir, set<CDir*>& s);
873 void find_nested_exports_under(CDir *import, CDir *dir, set<CDir*>& s);
874
875
876 private:
877 bool opening_root, open;
878 list<MDSInternalContextBase*> waiting_for_open;
879
880 public:
881 void init_layouts();
882 void create_unlinked_system_inode(CInode *in, inodeno_t ino,
883 int mode) const;
884 CInode *create_system_inode(inodeno_t ino, int mode);
885 CInode *create_root_inode();
886
887 void create_empty_hierarchy(MDSGather *gather);
888 void create_mydir_hierarchy(MDSGather *gather);
889
890 bool is_open() { return open; }
891 void wait_for_open(MDSInternalContextBase *c) {
892 waiting_for_open.push_back(c);
893 }
894
895 void open_root_inode(MDSInternalContextBase *c);
896 void open_root();
897 void open_mydir_inode(MDSInternalContextBase *c);
898 void open_mydir_frag(MDSInternalContextBase *c);
899 void populate_mydir();
900
901 void _create_system_file(CDir *dir, const char *name, CInode *in, MDSInternalContextBase *fin);
902 void _create_system_file_finish(MutationRef& mut, CDentry *dn,
903 version_t dpv, MDSInternalContextBase *fin);
904
905 void open_foreign_mdsdir(inodeno_t ino, MDSInternalContextBase *c);
906 CDir *get_stray_dir(CInode *in);
907 CDentry *get_or_create_stray_dentry(CInode *in);
908
909 MDSInternalContextBase *_get_waiter(MDRequestRef& mdr, Message *req, MDSInternalContextBase *fin);
910
911 /**
912 * Find the given dentry (and whether it exists or not), its ancestors,
913 * and get them all into memory and usable on this MDS. This function
914 * makes a best-effort attempt to load everything; if it needs to
915 * go away and do something then it will put the request on a waitlist.
916 * It prefers the mdr, then the req, then the fin. (At least one of these
917 * must be non-null.)
918 *
919 * At least one of the params mdr, req, and fin must be non-null.
920 *
921 * @param mdr The MDRequest associated with the path. Can be null.
922 * @param req The Message associated with the path. Can be null.
923 * @param fin The Context associated with the path. Can be null.
924 * @param path The path to traverse to.
925 * @param pdnvec Data return parameter -- on success, contains a
926 * vector of dentries. On failure, is either empty or contains the
927 * full trace of traversable dentries.
928 * @param pin Data return parameter -- if successful, points to the inode
929 * associated with filepath. If unsuccessful, is null.
930 * @param onfail Specifies different lookup failure behaviors. If set to
931 * MDS_TRAVERSE_DISCOVERXLOCK, path_traverse will succeed on null
932 * dentries (instead of returning -ENOENT). If set to
933 * MDS_TRAVERSE_FORWARD, it will forward the request to the auth
934 * MDS if that becomes appropriate (ie, if it doesn't know the contents
935 * of a directory). If set to MDS_TRAVERSE_DISCOVER, it
936 * will attempt to look up the path from a different MDS (and bring them
937 * into its cache as replicas).
938 *
939 * @returns 0 on success, 1 on "not done yet", 2 on "forwarding", -errno otherwise.
940 * If it returns 1, the requester associated with this call has been placed
941 * on the appropriate waitlist, and it should unwind itself and back out.
942 * If it returns 2 the request has been forwarded, and again the requester
943 * should unwind itself and back out.
944 */
945 int path_traverse(MDRequestRef& mdr, Message *req, MDSInternalContextBase *fin, const filepath& path,
946 vector<CDentry*> *pdnvec, CInode **pin, int onfail);
947
948 CInode *cache_traverse(const filepath& path);
949
950 void open_remote_dirfrag(CInode *diri, frag_t fg, MDSInternalContextBase *fin);
951 CInode *get_dentry_inode(CDentry *dn, MDRequestRef& mdr, bool projected=false);
952
953 bool parallel_fetch(map<inodeno_t,filepath>& pathmap, set<inodeno_t>& missing);
954 bool parallel_fetch_traverse_dir(inodeno_t ino, filepath& path,
955 set<CDir*>& fetch_queue, set<inodeno_t>& missing,
956 C_GatherBuilder &gather_bld);
957
958 void open_remote_dentry(CDentry *dn, bool projected, MDSInternalContextBase *fin,
959 bool want_xlocked=false);
960 void _open_remote_dentry_finish(CDentry *dn, inodeno_t ino, MDSInternalContextBase *fin,
961 bool want_xlocked, int r);
962
963 void make_trace(vector<CDentry*>& trace, CInode *in);
964
965 protected:
966 struct open_ino_info_t {
967 vector<inode_backpointer_t> ancestors;
968 set<mds_rank_t> checked;
969 mds_rank_t checking;
970 mds_rank_t auth_hint;
971 bool check_peers;
972 bool fetch_backtrace;
973 bool discover;
974 bool want_replica;
975 bool want_xlocked;
976 version_t tid;
977 int64_t pool;
978 int last_err;
979 list<MDSInternalContextBase*> waiters;
980 open_ino_info_t() : checking(MDS_RANK_NONE), auth_hint(MDS_RANK_NONE),
981 check_peers(true), fetch_backtrace(true), discover(false),
982 want_replica(false), want_xlocked(false), tid(0), pool(-1),
983 last_err(0) {}
984 };
985 ceph_tid_t open_ino_last_tid;
986 map<inodeno_t,open_ino_info_t> opening_inodes;
987
988 void _open_ino_backtrace_fetched(inodeno_t ino, bufferlist& bl, int err);
989 void _open_ino_parent_opened(inodeno_t ino, int ret);
990 void _open_ino_traverse_dir(inodeno_t ino, open_ino_info_t& info, int err);
991 void _open_ino_fetch_dir(inodeno_t ino, MMDSOpenIno *m, CDir *dir, bool parent);
992 int open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
993 vector<inode_backpointer_t>& ancestors,
994 bool discover, bool want_xlocked, mds_rank_t *hint);
995 void open_ino_finish(inodeno_t ino, open_ino_info_t& info, int err);
996 void do_open_ino(inodeno_t ino, open_ino_info_t& info, int err);
997 void do_open_ino_peer(inodeno_t ino, open_ino_info_t& info);
998 void handle_open_ino(MMDSOpenIno *m, int err=0);
999 void handle_open_ino_reply(MMDSOpenInoReply *m);
1000 friend class C_IO_MDC_OpenInoBacktraceFetched;
1001 friend struct C_MDC_OpenInoTraverseDir;
1002 friend struct C_MDC_OpenInoParentOpened;
1003
1004 public:
1005 void kick_open_ino_peers(mds_rank_t who);
1006 void open_ino(inodeno_t ino, int64_t pool, MDSInternalContextBase *fin,
1007 bool want_replica=true, bool want_xlocked=false);
1008
1009 // -- find_ino_peer --
1010 struct find_ino_peer_info_t {
1011 inodeno_t ino;
1012 ceph_tid_t tid;
1013 MDSInternalContextBase *fin;
1014 mds_rank_t hint;
1015 mds_rank_t checking;
1016 set<mds_rank_t> checked;
1017
1018 find_ino_peer_info_t() : tid(0), fin(NULL), hint(MDS_RANK_NONE), checking(MDS_RANK_NONE) {}
1019 };
1020
1021 map<ceph_tid_t, find_ino_peer_info_t> find_ino_peer;
1022 ceph_tid_t find_ino_peer_last_tid;
1023
1024 void find_ino_peers(inodeno_t ino, MDSInternalContextBase *c, mds_rank_t hint=MDS_RANK_NONE);
1025 void _do_find_ino_peer(find_ino_peer_info_t& fip);
1026 void handle_find_ino(MMDSFindIno *m);
1027 void handle_find_ino_reply(MMDSFindInoReply *m);
1028 void kick_find_ino_peers(mds_rank_t who);
1029
1030 // -- snaprealms --
1031 public:
1032 void snaprealm_create(MDRequestRef& mdr, CInode *in);
1033 void _snaprealm_create_finish(MDRequestRef& mdr, MutationRef& mut, CInode *in);
1034
1035 // -- stray --
1036 public:
1037 void fetch_backtrace(inodeno_t ino, int64_t pool, bufferlist& bl, Context *fin);
1038 uint64_t get_num_strays() const { return stray_manager.get_num_strays(); }
1039
1040 protected:
1041 void scan_stray_dir(dirfrag_t next=dirfrag_t());
1042 StrayManager stray_manager;
1043 friend struct C_MDC_RetryScanStray;
1044 friend class C_IO_MDC_FetchedBacktrace;
1045
1046 // == messages ==
1047 public:
1048 void dispatch(Message *m);
1049
1050 protected:
1051 // -- replicas --
1052 void handle_discover(MDiscover *dis);
1053 void handle_discover_reply(MDiscoverReply *m);
1054 friend class C_MDC_Join;
1055
1056 public:
1057 void replicate_dir(CDir *dir, mds_rank_t to, bufferlist& bl);
1058 void replicate_dentry(CDentry *dn, mds_rank_t to, bufferlist& bl);
1059 void replicate_inode(CInode *in, mds_rank_t to, bufferlist& bl,
1060 uint64_t features);
1061
1062 CDir* add_replica_dir(bufferlist::iterator& p, CInode *diri, mds_rank_t from, list<MDSInternalContextBase*>& finished);
1063 CDentry *add_replica_dentry(bufferlist::iterator& p, CDir *dir, list<MDSInternalContextBase*>& finished);
1064 CInode *add_replica_inode(bufferlist::iterator& p, CDentry *dn, list<MDSInternalContextBase*>& finished);
1065
1066 void replicate_stray(CDentry *straydn, mds_rank_t who, bufferlist& bl);
1067 CDentry *add_replica_stray(bufferlist &bl, mds_rank_t from);
1068
1069 // -- namespace --
1070 public:
1071 void send_dentry_link(CDentry *dn, MDRequestRef& mdr);
1072 void send_dentry_unlink(CDentry *dn, CDentry *straydn, MDRequestRef& mdr);
1073 protected:
1074 void handle_dentry_link(MDentryLink *m);
1075 void handle_dentry_unlink(MDentryUnlink *m);
1076
1077
1078 // -- fragmenting --
1079 private:
1080 struct ufragment {
1081 int bits;
1082 bool committed;
1083 LogSegment *ls;
1084 list<MDSInternalContextBase*> waiters;
1085 list<frag_t> old_frags;
1086 bufferlist rollback;
1087 ufragment() : bits(0), committed(false), ls(NULL) {}
1088 };
1089 map<dirfrag_t, ufragment> uncommitted_fragments;
1090
1091 struct fragment_info_t {
1092 int bits;
1093 list<CDir*> dirs;
1094 list<CDir*> resultfrags;
1095 MDRequestRef mdr;
1096 // for deadlock detection
1097 bool all_frozen;
1098 utime_t last_cum_auth_pins_change;
1099 int last_cum_auth_pins;
1100 int num_remote_waiters; // number of remote authpin waiters
1101 fragment_info_t() : bits(0), all_frozen(false), last_cum_auth_pins(0), num_remote_waiters(0) {}
1102 bool is_fragmenting() { return !resultfrags.empty(); }
1103 };
1104 map<dirfrag_t,fragment_info_t> fragments;
1105
1106 void adjust_dir_fragments(CInode *diri, frag_t basefrag, int bits,
1107 list<CDir*>& frags, list<MDSInternalContextBase*>& waiters, bool replay);
1108 void adjust_dir_fragments(CInode *diri,
1109 list<CDir*>& srcfrags,
1110 frag_t basefrag, int bits,
1111 list<CDir*>& resultfrags,
1112 list<MDSInternalContextBase*>& waiters,
1113 bool replay);
1114 CDir *force_dir_fragment(CInode *diri, frag_t fg, bool replay=true);
1115 void get_force_dirfrag_bound_set(vector<dirfrag_t>& dfs, set<CDir*>& bounds);
1116
1117 bool can_fragment(CInode *diri, list<CDir*>& dirs);
1118 void fragment_freeze_dirs(list<CDir*>& dirs);
1119 void fragment_mark_and_complete(MDRequestRef& mdr);
1120 void fragment_frozen(MDRequestRef& mdr, int r);
1121 void fragment_unmark_unfreeze_dirs(list<CDir*>& dirs);
1122 void dispatch_fragment_dir(MDRequestRef& mdr);
1123 void _fragment_logged(MDRequestRef& mdr);
1124 void _fragment_stored(MDRequestRef& mdr);
1125 void _fragment_committed(dirfrag_t f, list<CDir*>& resultfrags);
1126 void _fragment_finish(dirfrag_t f, list<CDir*>& resultfrags);
1127
1128 friend class EFragment;
1129 friend class C_MDC_FragmentFrozen;
1130 friend class C_MDC_FragmentMarking;
1131 friend class C_MDC_FragmentPrep;
1132 friend class C_MDC_FragmentStore;
1133 friend class C_MDC_FragmentCommit;
1134 friend class C_IO_MDC_FragmentFinish;
1135
1136 void handle_fragment_notify(MMDSFragmentNotify *m);
1137
1138 void add_uncommitted_fragment(dirfrag_t basedirfrag, int bits, list<frag_t>& old_frag,
1139 LogSegment *ls, bufferlist *rollback=NULL);
1140 void finish_uncommitted_fragment(dirfrag_t basedirfrag, int op);
1141 void rollback_uncommitted_fragment(dirfrag_t basedirfrag, list<frag_t>& old_frags);
1142 public:
1143 void wait_for_uncommitted_fragment(dirfrag_t dirfrag, MDSInternalContextBase *c) {
1144 assert(uncommitted_fragments.count(dirfrag));
1145 uncommitted_fragments[dirfrag].waiters.push_back(c);
1146 }
1147 void split_dir(CDir *dir, int byn);
1148 void merge_dir(CInode *diri, frag_t fg);
1149 void rollback_uncommitted_fragments();
1150
1151 void find_stale_fragment_freeze();
1152 void fragment_freeze_inc_num_waiters(CDir *dir);
1153 bool fragment_are_all_frozen(CDir *dir);
1154 int get_num_fragmenting_dirs() { return fragments.size(); }
1155
1156 // -- updates --
1157 //int send_inode_updates(CInode *in);
1158 //void handle_inode_update(MInodeUpdate *m);
1159
1160 int send_dir_updates(CDir *in, bool bcast=false);
1161 void handle_dir_update(MDirUpdate *m);
1162
1163 // -- cache expiration --
1164 void handle_cache_expire(MCacheExpire *m);
1165 void process_delayed_expire(CDir *dir);
1166 void discard_delayed_expire(CDir *dir);
1167
1168 protected:
1169 int dump_cache(boost::string_view fn, Formatter *f,
1170 boost::string_view dump_root = "",
1171 int depth = -1);
1172 public:
1173 int dump_cache() { return dump_cache(NULL, NULL); }
1174 int dump_cache(boost::string_view filename);
1175 int dump_cache(Formatter *f);
1176 int dump_cache(boost::string_view dump_root, int depth, Formatter *f);
1177
1178 int cache_status(Formatter *f);
1179
1180 void dump_resolve_status(Formatter *f) const;
1181 void dump_rejoin_status(Formatter *f) const;
1182
1183 // == crap fns ==
1184 public:
1185 void show_cache();
1186 void show_subtrees(int dbl=10);
1187
1188 CInode *hack_pick_random_inode() {
1189 assert(!inode_map.empty());
1190 int n = rand() % inode_map.size();
1191 auto p = inode_map.begin();
1192 while (n--) ++p;
1193 return p->second;
1194 }
1195
1196 protected:
1197 void flush_dentry_work(MDRequestRef& mdr);
1198 /**
1199 * Resolve path to a dentry and pass it onto the ScrubStack.
1200 *
1201 * TODO: return enough information to the original mdr formatter
1202 * and completion that they can subsequeuntly check the progress of
1203 * this scrub (we won't block them on a whole scrub as it can take a very
1204 * long time)
1205 */
1206 void enqueue_scrub_work(MDRequestRef& mdr);
1207 void repair_inode_stats_work(MDRequestRef& mdr);
1208 void repair_dirfrag_stats_work(MDRequestRef& mdr);
1209 friend class C_MDC_RepairDirfragStats;
1210 public:
1211 void flush_dentry(boost::string_view path, Context *fin);
1212 /**
1213 * Create and start an OP_ENQUEUE_SCRUB
1214 */
1215 void enqueue_scrub(boost::string_view path, boost::string_view tag,
1216 bool force, bool recursive, bool repair,
1217 Formatter *f, Context *fin);
1218 void repair_inode_stats(CInode *diri);
1219 void repair_dirfrag_stats(CDir *dir);
1220
1221 public:
1222 /* Because exports may fail, this set lets us keep track of inodes that need exporting. */
1223 std::set<CInode *> export_pin_queue;
1224 };
1225
1226 class C_MDS_RetryRequest : public MDSInternalContext {
1227 MDCache *cache;
1228 MDRequestRef mdr;
1229 public:
1230 C_MDS_RetryRequest(MDCache *c, MDRequestRef& r);
1231 void finish(int r) override;
1232 };
1233
1234 #endif