]> git.proxmox.com Git - ceph.git/blob - ceph/src/mds/MDCache.h
update sources to v12.1.3
[ceph.git] / ceph / src / mds / MDCache.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15
16
17 #ifndef CEPH_MDCACHE_H
18 #define CEPH_MDCACHE_H
19
20 #include "include/types.h"
21 #include "include/filepath.h"
22 #include "include/elist.h"
23
24 #include "osdc/Filer.h"
25 #include "CInode.h"
26 #include "CDentry.h"
27 #include "CDir.h"
28 #include "include/Context.h"
29 #include "events/EMetaBlob.h"
30 #include "RecoveryQueue.h"
31 #include "StrayManager.h"
32 #include "MDSContext.h"
33 #include "MDSMap.h"
34 #include "Mutation.h"
35
36 #include "messages/MClientRequest.h"
37 #include "messages/MMDSSlaveRequest.h"
38
39 class PerfCounters;
40
41 class MDSRank;
42 class Session;
43 class Migrator;
44
45 class Message;
46 class Session;
47
48 class MMDSResolve;
49 class MMDSResolveAck;
50 class MMDSCacheRejoin;
51 class MDiscover;
52 class MDiscoverReply;
53 class MCacheExpire;
54 class MDirUpdate;
55 class MDentryLink;
56 class MDentryUnlink;
57 class MLock;
58 struct MMDSFindIno;
59 struct MMDSFindInoReply;
60 struct MMDSOpenIno;
61 struct MMDSOpenInoReply;
62
63 class Message;
64 class MClientRequest;
65 class MMDSSlaveRequest;
66 struct MClientSnap;
67
68 class MMDSFragmentNotify;
69
70 class ESubtreeMap;
71
72 enum {
73 l_mdc_first = 3000,
74 // How many inodes currently in stray dentries
75 l_mdc_num_strays,
76 // How many stray dentries are currently delayed for purge due to refs
77 l_mdc_num_strays_delayed,
78 // How many stray dentries are currently being enqueued for purge
79 l_mdc_num_strays_enqueuing,
80
81 // How many dentries have ever been added to stray dir
82 l_mdc_strays_created,
83 // How many dentries have been passed on to PurgeQueue
84 l_mdc_strays_enqueued,
85 // How many strays have been reintegrated?
86 l_mdc_strays_reintegrated,
87 // How many strays have been migrated?
88 l_mdc_strays_migrated,
89
90 // How many inode sizes currently being recovered
91 l_mdc_num_recovering_processing,
92 // How many inodes currently waiting to have size recovered
93 l_mdc_num_recovering_enqueued,
94 // How many inodes waiting with elevated priority for recovery
95 l_mdc_num_recovering_prioritized,
96 // How many inodes ever started size recovery
97 l_mdc_recovery_started,
98 // How many inodes ever completed size recovery
99 l_mdc_recovery_completed,
100
101 l_mdss_ireq_enqueue_scrub,
102 l_mdss_ireq_exportdir,
103 l_mdss_ireq_flush,
104 l_mdss_ireq_fragmentdir,
105 l_mdss_ireq_fragstats,
106 l_mdss_ireq_inodestats,
107
108 l_mdc_last,
109 };
110
111
112 // flags for predirty_journal_parents()
113 static const int PREDIRTY_PRIMARY = 1; // primary dn, adjust nested accounting
114 static const int PREDIRTY_DIR = 2; // update parent dir mtime/size
115 static const int PREDIRTY_SHALLOW = 4; // only go to immediate parent (for easier rollback)
116
117 class MDCache {
118 public:
119 // my master
120 MDSRank *mds;
121
122 // -- my cache --
123 LRU lru; // dentry lru for expiring items from cache
124 LRU bottom_lru; // dentries that should be trimmed ASAP
125 protected:
126 ceph::unordered_map<vinodeno_t,CInode*> inode_map; // map of inodes by ino
127 CInode *root; // root inode
128 CInode *myin; // .ceph/mds%d dir
129
130 bool readonly;
131 void set_readonly() { readonly = true; }
132
133 CInode *strays[NUM_STRAY]; // my stray dir
134 int stray_index;
135
136 CInode *get_stray() {
137 return strays[stray_index];
138 }
139
140 set<CInode*> base_inodes;
141
142 std::unique_ptr<PerfCounters> logger;
143
144 Filer filer;
145
146 bool exceeded_size_limit;
147
148 public:
149 void advance_stray() {
150 stray_index = (stray_index+1)%NUM_STRAY;
151 }
152
153 void activate_stray_manager();
154
155 /**
156 * Call this when you know that a CDentry is ready to be passed
157 * on to StrayManager (i.e. this is a stray you've just created)
158 */
159 void notify_stray(CDentry *dn) {
160 assert(dn->get_dir()->get_inode()->is_stray());
161 stray_manager.eval_stray(dn);
162 }
163
164 void maybe_eval_stray(CInode *in, bool delay=false);
165 void clear_dirty_bits_for_stray(CInode* diri);
166
167 bool is_readonly() { return readonly; }
168 void force_readonly();
169
170 DecayRate decayrate;
171
172 int num_inodes_with_caps;
173
174 unsigned max_dir_commit_size;
175
176 static file_layout_t gen_default_file_layout(const MDSMap &mdsmap);
177 static file_layout_t gen_default_log_layout(const MDSMap &mdsmap);
178
179 file_layout_t default_file_layout;
180 file_layout_t default_log_layout;
181
182 void register_perfcounters();
183
184 // -- client leases --
185 public:
186 static const int client_lease_pools = 3;
187 float client_lease_durations[client_lease_pools];
188 protected:
189 xlist<ClientLease*> client_leases[client_lease_pools];
190 public:
191 void touch_client_lease(ClientLease *r, int pool, utime_t ttl) {
192 client_leases[pool].push_back(&r->item_lease);
193 r->ttl = ttl;
194 }
195
196 void notify_stray_removed()
197 {
198 stray_manager.notify_stray_removed();
199 }
200
201 void notify_stray_created()
202 {
203 stray_manager.notify_stray_created();
204 }
205
206 void eval_remote(CDentry *dn)
207 {
208 stray_manager.eval_remote(dn);
209 }
210
211 // -- client caps --
212 uint64_t last_cap_id;
213
214
215
216 // -- discover --
217 struct discover_info_t {
218 ceph_tid_t tid;
219 mds_rank_t mds;
220 inodeno_t ino;
221 frag_t frag;
222 snapid_t snap;
223 filepath want_path;
224 CInode *basei;
225 bool want_base_dir;
226 bool want_xlocked;
227
228 discover_info_t() :
229 tid(0), mds(-1), snap(CEPH_NOSNAP), basei(NULL),
230 want_base_dir(false), want_xlocked(false) {}
231 ~discover_info_t() {
232 if (basei)
233 basei->put(MDSCacheObject::PIN_DISCOVERBASE);
234 }
235 void pin_base(CInode *b) {
236 basei = b;
237 basei->get(MDSCacheObject::PIN_DISCOVERBASE);
238 }
239 };
240
241 map<ceph_tid_t, discover_info_t> discovers;
242 ceph_tid_t discover_last_tid;
243
244 void _send_discover(discover_info_t& dis);
245 discover_info_t& _create_discover(mds_rank_t mds) {
246 ceph_tid_t t = ++discover_last_tid;
247 discover_info_t& d = discovers[t];
248 d.tid = t;
249 d.mds = mds;
250 return d;
251 }
252
253 // waiters
254 map<int, map<inodeno_t, list<MDSInternalContextBase*> > > waiting_for_base_ino;
255
256 void discover_base_ino(inodeno_t want_ino, MDSInternalContextBase *onfinish, mds_rank_t from=MDS_RANK_NONE);
257 void discover_dir_frag(CInode *base, frag_t approx_fg, MDSInternalContextBase *onfinish,
258 mds_rank_t from=MDS_RANK_NONE);
259 void discover_path(CInode *base, snapid_t snap, filepath want_path, MDSInternalContextBase *onfinish,
260 bool want_xlocked=false, mds_rank_t from=MDS_RANK_NONE);
261 void discover_path(CDir *base, snapid_t snap, filepath want_path, MDSInternalContextBase *onfinish,
262 bool want_xlocked=false);
263 void kick_discovers(mds_rank_t who); // after a failure.
264
265
266 // -- subtrees --
267 protected:
268 /* subtree keys and each tree's non-recursive nested subtrees (the "bounds") */
269 map<CDir*,set<CDir*> > subtrees;
270 map<CInode*,list<pair<CDir*,CDir*> > > projected_subtree_renames; // renamed ino -> target dir
271
272 // adjust subtree auth specification
273 // dir->dir_auth
274 // imports/exports/nested_exports
275 // join/split subtrees as appropriate
276 public:
277 bool is_subtrees() { return !subtrees.empty(); }
278 void list_subtrees(list<CDir*>& ls);
279 void adjust_subtree_auth(CDir *root, mds_authority_t auth);
280 void adjust_subtree_auth(CDir *root, mds_rank_t a, mds_rank_t b=CDIR_AUTH_UNKNOWN) {
281 adjust_subtree_auth(root, mds_authority_t(a,b));
282 }
283 void adjust_bounded_subtree_auth(CDir *dir, set<CDir*>& bounds, mds_authority_t auth);
284 void adjust_bounded_subtree_auth(CDir *dir, set<CDir*>& bounds, mds_rank_t a) {
285 adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
286 }
287 void adjust_bounded_subtree_auth(CDir *dir, vector<dirfrag_t>& bounds, mds_authority_t auth);
288 void adjust_bounded_subtree_auth(CDir *dir, vector<dirfrag_t>& bounds, mds_rank_t a) {
289 adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
290 }
291 void map_dirfrag_set(list<dirfrag_t>& dfs, set<CDir*>& result);
292 void try_subtree_merge(CDir *root);
293 void try_subtree_merge_at(CDir *root, set<CInode*> *to_eval);
294 void subtree_merge_writebehind_finish(CInode *in, MutationRef& mut);
295 void eval_subtree_root(CInode *diri);
296 CDir *get_subtree_root(CDir *dir);
297 CDir *get_projected_subtree_root(CDir *dir);
298 bool is_leaf_subtree(CDir *dir) {
299 assert(subtrees.count(dir));
300 return subtrees[dir].empty();
301 }
302 void remove_subtree(CDir *dir);
303 bool is_subtree(CDir *root) {
304 return subtrees.count(root);
305 }
306 void get_subtree_bounds(CDir *root, set<CDir*>& bounds);
307 void get_wouldbe_subtree_bounds(CDir *root, set<CDir*>& bounds);
308 void verify_subtree_bounds(CDir *root, const set<CDir*>& bounds);
309 void verify_subtree_bounds(CDir *root, const list<dirfrag_t>& bounds);
310
311 void project_subtree_rename(CInode *diri, CDir *olddir, CDir *newdir);
312 void adjust_subtree_after_rename(CInode *diri, CDir *olddir, bool pop);
313
314 void get_auth_subtrees(set<CDir*>& s);
315 void get_fullauth_subtrees(set<CDir*>& s);
316
317 int num_subtrees();
318 int num_subtrees_fullauth();
319 int num_subtrees_fullnonauth();
320
321
322 protected:
323 // delayed cache expire
324 map<CDir*, map<mds_rank_t, MCacheExpire*> > delayed_expire; // subtree root -> expire msg
325
326
327 // -- requests --
328 ceph::unordered_map<metareqid_t, MDRequestRef> active_requests;
329
330 public:
331 int get_num_client_requests();
332
333 MDRequestRef request_start(MClientRequest *req);
334 MDRequestRef request_start_slave(metareqid_t rid, __u32 attempt, Message *m);
335 MDRequestRef request_start_internal(int op);
336 bool have_request(metareqid_t rid) {
337 return active_requests.count(rid);
338 }
339 MDRequestRef request_get(metareqid_t rid);
340 void request_pin_ref(MDRequestRef& r, CInode *ref, vector<CDentry*>& trace);
341 void request_finish(MDRequestRef& mdr);
342 void request_forward(MDRequestRef& mdr, mds_rank_t mds, int port=0);
343 void dispatch_request(MDRequestRef& mdr);
344 void request_drop_foreign_locks(MDRequestRef& mdr);
345 void request_drop_non_rdlocks(MDRequestRef& r);
346 void request_drop_locks(MDRequestRef& r);
347 void request_cleanup(MDRequestRef& r);
348
349 void request_kill(MDRequestRef& r); // called when session closes
350
351 // journal/snap helpers
352 CInode *pick_inode_snap(CInode *in, snapid_t follows);
353 CInode *cow_inode(CInode *in, snapid_t last);
354 void journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, CDentry *dn,
355 snapid_t follows=CEPH_NOSNAP,
356 CInode **pcow_inode=0, CDentry::linkage_t *dnl=0);
357 void journal_cow_inode(MutationRef& mut, EMetaBlob *metablob, CInode *in, snapid_t follows=CEPH_NOSNAP,
358 CInode **pcow_inode=0);
359 void journal_dirty_inode(MutationImpl *mut, EMetaBlob *metablob, CInode *in, snapid_t follows=CEPH_NOSNAP);
360
361 void project_rstat_inode_to_frag(CInode *cur, CDir *parent, snapid_t first,
362 int linkunlink, SnapRealm *prealm);
363 void _project_rstat_inode_to_frag(inode_t& inode, snapid_t ofirst, snapid_t last,
364 CDir *parent, int linkunlink, bool update_inode);
365 void project_rstat_frag_to_inode(nest_info_t& rstat, nest_info_t& accounted_rstat,
366 snapid_t ofirst, snapid_t last,
367 CInode *pin, bool cow_head);
368 void broadcast_quota_to_client(CInode *in);
369 void predirty_journal_parents(MutationRef mut, EMetaBlob *blob,
370 CInode *in, CDir *parent,
371 int flags, int linkunlink=0,
372 snapid_t follows=CEPH_NOSNAP);
373
374 // slaves
375 void add_uncommitted_master(metareqid_t reqid, LogSegment *ls, set<mds_rank_t> &slaves, bool safe=false) {
376 uncommitted_masters[reqid].ls = ls;
377 uncommitted_masters[reqid].slaves = slaves;
378 uncommitted_masters[reqid].safe = safe;
379 }
380 void wait_for_uncommitted_master(metareqid_t reqid, MDSInternalContextBase *c) {
381 uncommitted_masters[reqid].waiters.push_back(c);
382 }
383 bool have_uncommitted_master(metareqid_t reqid, mds_rank_t from) {
384 auto p = uncommitted_masters.find(reqid);
385 return p != uncommitted_masters.end() && p->second.slaves.count(from) > 0;
386 }
387 void log_master_commit(metareqid_t reqid);
388 void logged_master_update(metareqid_t reqid);
389 void _logged_master_commit(metareqid_t reqid);
390 void committed_master_slave(metareqid_t r, mds_rank_t from);
391 void finish_committed_masters();
392
393 void _logged_slave_commit(mds_rank_t from, metareqid_t reqid);
394
395 // -- recovery --
396 protected:
397 set<mds_rank_t> recovery_set;
398
399 public:
400 void set_recovery_set(set<mds_rank_t>& s);
401 void handle_mds_failure(mds_rank_t who);
402 void handle_mds_recovery(mds_rank_t who);
403
404 protected:
405 // [resolve]
406 // from EImportStart w/o EImportFinish during journal replay
407 map<dirfrag_t, vector<dirfrag_t> > my_ambiguous_imports;
408 // from MMDSResolves
409 map<mds_rank_t, map<dirfrag_t, vector<dirfrag_t> > > other_ambiguous_imports;
410
411 map<mds_rank_t, map<metareqid_t, MDSlaveUpdate*> > uncommitted_slave_updates; // slave: for replay.
412 map<CInode*, int> uncommitted_slave_rename_olddir; // slave: preserve the non-auth dir until seeing commit.
413 map<CInode*, int> uncommitted_slave_unlink; // slave: preserve the unlinked inode until seeing commit.
414
415 // track master requests whose slaves haven't acknowledged commit
416 struct umaster {
417 set<mds_rank_t> slaves;
418 LogSegment *ls;
419 list<MDSInternalContextBase*> waiters;
420 bool safe;
421 bool committing;
422 bool recovering;
423 umaster() : ls(NULL), safe(false), committing(false), recovering(false) {}
424 };
425 map<metareqid_t, umaster> uncommitted_masters; // master: req -> slave set
426
427 set<metareqid_t> pending_masters;
428 map<int, set<metareqid_t> > ambiguous_slave_updates;
429
430 friend class ESlaveUpdate;
431 friend class ECommitted;
432
433 bool resolves_pending;
434 set<mds_rank_t> resolve_gather; // nodes i need resolves from
435 set<mds_rank_t> resolve_ack_gather; // nodes i need a resolve_ack from
436 map<metareqid_t, mds_rank_t> need_resolve_rollback; // rollbacks i'm writing to the journal
437 map<mds_rank_t, MMDSResolve*> delayed_resolve;
438
439 void handle_resolve(MMDSResolve *m);
440 void handle_resolve_ack(MMDSResolveAck *m);
441 void process_delayed_resolve();
442 void discard_delayed_resolve(mds_rank_t who);
443 void maybe_resolve_finish();
444 void disambiguate_my_imports();
445 void disambiguate_other_imports();
446 void trim_unlinked_inodes();
447 void add_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master, MDSlaveUpdate*);
448 void finish_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master);
449 MDSlaveUpdate* get_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master);
450 public:
451 void recalc_auth_bits(bool replay);
452 void remove_inode_recursive(CInode *in);
453
454 bool is_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
455 auto p = ambiguous_slave_updates.find(master);
456 return p != ambiguous_slave_updates.end() && p->second.count(reqid);
457 }
458 void add_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
459 ambiguous_slave_updates[master].insert(reqid);
460 }
461 void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
462 auto p = ambiguous_slave_updates.find(master);
463 auto q = p->second.find(reqid);
464 assert(q != p->second.end());
465 p->second.erase(q);
466 if (p->second.empty())
467 ambiguous_slave_updates.erase(p);
468 }
469
470 void add_rollback(metareqid_t reqid, mds_rank_t master) {
471 need_resolve_rollback[reqid] = master;
472 }
473 void finish_rollback(metareqid_t reqid);
474
475 // ambiguous imports
476 void add_ambiguous_import(dirfrag_t base, const vector<dirfrag_t>& bounds);
477 void add_ambiguous_import(CDir *base, const set<CDir*>& bounds);
478 bool have_ambiguous_import(dirfrag_t base) {
479 return my_ambiguous_imports.count(base);
480 }
481 void get_ambiguous_import_bounds(dirfrag_t base, vector<dirfrag_t>& bounds) {
482 assert(my_ambiguous_imports.count(base));
483 bounds = my_ambiguous_imports[base];
484 }
485 void cancel_ambiguous_import(CDir *);
486 void finish_ambiguous_import(dirfrag_t dirino);
487 void resolve_start(MDSInternalContext *resolve_done_);
488 void send_resolves();
489 void send_slave_resolves();
490 void send_subtree_resolves();
491 void maybe_send_pending_resolves() {
492 if (resolves_pending)
493 send_subtree_resolves();
494 }
495
496 void _move_subtree_map_bound(dirfrag_t df, dirfrag_t oldparent, dirfrag_t newparent,
497 map<dirfrag_t,vector<dirfrag_t> >& subtrees);
498 ESubtreeMap *create_subtree_map();
499
500
501 void clean_open_file_lists();
502
503 protected:
504 // [rejoin]
505 bool rejoins_pending;
506 set<mds_rank_t> rejoin_gather; // nodes from whom i need a rejoin
507 set<mds_rank_t> rejoin_sent; // nodes i sent a rejoin to
508 set<mds_rank_t> rejoin_ack_sent; // nodes i sent a rejoin to
509 set<mds_rank_t> rejoin_ack_gather; // nodes from whom i need a rejoin ack
510 map<mds_rank_t,map<inodeno_t,map<client_t,Capability::Import> > > rejoin_imported_caps;
511 map<inodeno_t,pair<mds_rank_t,map<client_t,Capability::Export> > > rejoin_slave_exports;
512 map<client_t,entity_inst_t> rejoin_client_map;
513
514 map<inodeno_t,map<client_t,cap_reconnect_t> > cap_exports; // ino -> client -> capex
515 map<inodeno_t,mds_rank_t> cap_export_targets; // ino -> auth mds
516
517 map<inodeno_t,map<client_t,map<mds_rank_t,cap_reconnect_t> > > cap_imports; // ino -> client -> frommds -> capex
518 set<inodeno_t> cap_imports_missing;
519 map<inodeno_t, list<MDSInternalContextBase*> > cap_reconnect_waiters;
520 int cap_imports_num_opening;
521
522 set<CInode*> rejoin_undef_inodes;
523 set<CInode*> rejoin_potential_updated_scatterlocks;
524 set<CDir*> rejoin_undef_dirfrags;
525 map<mds_rank_t, set<CInode*> > rejoin_unlinked_inodes;
526
527 vector<CInode*> rejoin_recover_q, rejoin_check_q;
528 list<SimpleLock*> rejoin_eval_locks;
529 list<MDSInternalContextBase*> rejoin_waiters;
530
531 void rejoin_walk(CDir *dir, MMDSCacheRejoin *rejoin);
532 void handle_cache_rejoin(MMDSCacheRejoin *m);
533 void handle_cache_rejoin_weak(MMDSCacheRejoin *m);
534 CInode* rejoin_invent_inode(inodeno_t ino, snapid_t last);
535 CDir* rejoin_invent_dirfrag(dirfrag_t df);
536 void handle_cache_rejoin_strong(MMDSCacheRejoin *m);
537 void rejoin_scour_survivor_replicas(mds_rank_t from, MMDSCacheRejoin *ack,
538 set<vinodeno_t>& acked_inodes,
539 set<SimpleLock *>& gather_locks);
540 void handle_cache_rejoin_ack(MMDSCacheRejoin *m);
541 void rejoin_send_acks();
542 void rejoin_trim_undef_inodes();
543 void maybe_send_pending_rejoins() {
544 if (rejoins_pending)
545 rejoin_send_rejoins();
546 }
547 std::unique_ptr<MDSInternalContext> rejoin_done;
548 std::unique_ptr<MDSInternalContext> resolve_done;
549 public:
550 void rejoin_start(MDSInternalContext *rejoin_done_);
551 void rejoin_gather_finish();
552 void rejoin_send_rejoins();
553 void rejoin_export_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
554 int target=-1) {
555 cap_exports[ino][client] = icr;
556 cap_export_targets[ino] = target;
557 }
558 void rejoin_recovered_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
559 mds_rank_t frommds=MDS_RANK_NONE) {
560 cap_imports[ino][client][frommds] = icr;
561 }
562 const cap_reconnect_t *get_replay_cap_reconnect(inodeno_t ino, client_t client) {
563 if (cap_imports.count(ino) &&
564 cap_imports[ino].count(client) &&
565 cap_imports[ino][client].count(MDS_RANK_NONE)) {
566 return &cap_imports[ino][client][MDS_RANK_NONE];
567 }
568 return NULL;
569 }
570 void remove_replay_cap_reconnect(inodeno_t ino, client_t client) {
571 assert(cap_imports[ino].size() == 1);
572 assert(cap_imports[ino][client].size() == 1);
573 cap_imports.erase(ino);
574 }
575 void wait_replay_cap_reconnect(inodeno_t ino, MDSInternalContextBase *c) {
576 cap_reconnect_waiters[ino].push_back(c);
577 }
578
579 // [reconnect/rejoin caps]
580 struct reconnected_cap_info_t {
581 inodeno_t realm_ino;
582 snapid_t snap_follows;
583 int dirty_caps;
584 reconnected_cap_info_t() :
585 realm_ino(0), snap_follows(0), dirty_caps(0) {}
586 };
587 map<inodeno_t,map<client_t, reconnected_cap_info_t> > reconnected_caps; // inode -> client -> snap_follows,realmino
588 map<inodeno_t,map<client_t, snapid_t> > reconnected_snaprealms; // realmino -> client -> realmseq
589
590 void add_reconnected_cap(client_t client, inodeno_t ino, const cap_reconnect_t& icr) {
591 reconnected_cap_info_t &info = reconnected_caps[ino][client];
592 info.realm_ino = inodeno_t(icr.capinfo.snaprealm);
593 info.snap_follows = icr.snap_follows;
594 }
595 void set_reconnected_dirty_caps(client_t client, inodeno_t ino, int dirty) {
596 reconnected_cap_info_t &info = reconnected_caps[ino][client];
597 info.dirty_caps |= dirty;
598 }
599 void add_reconnected_snaprealm(client_t client, inodeno_t ino, snapid_t seq) {
600 reconnected_snaprealms[ino][client] = seq;
601 }
602
603 friend class C_MDC_RejoinOpenInoFinish;
604 friend class C_MDC_RejoinSessionsOpened;
605 void rejoin_open_ino_finish(inodeno_t ino, int ret);
606 void rejoin_open_sessions_finish(map<client_t,entity_inst_t> client_map,
607 map<client_t,uint64_t>& sseqmap);
608 bool process_imported_caps();
609 void choose_lock_states_and_reconnect_caps();
610 void prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t ino,
611 map<client_t,MClientSnap*>& splits);
612 void do_realm_invalidate_and_update_notify(CInode *in, int snapop, bool nosend=false);
613 void send_snaps(map<client_t,MClientSnap*>& splits);
614 Capability* rejoin_import_cap(CInode *in, client_t client, const cap_reconnect_t& icr, mds_rank_t frommds);
615 void finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snapid_t seq);
616 void try_reconnect_cap(CInode *in, Session *session);
617 void export_remaining_imported_caps();
618
619 // cap imports. delayed snap parent opens.
620 // realm inode -> client -> cap inodes needing to split to this realm
621 map<CInode*,set<CInode*> > missing_snap_parents;
622 map<client_t,set<CInode*> > delayed_imported_caps;
623
624 void do_cap_import(Session *session, CInode *in, Capability *cap,
625 uint64_t p_cap_id, ceph_seq_t p_seq, ceph_seq_t p_mseq,
626 int peer, int p_flags);
627 void do_delayed_cap_imports();
628 void rebuild_need_snapflush(CInode *head_in, SnapRealm *realm, client_t client,
629 snapid_t snap_follows);
630 void check_realm_past_parents(SnapRealm *realm, bool reconnect);
631 void open_snap_parents();
632
633 bool open_undef_inodes_dirfrags();
634 void opened_undef_inode(CInode *in);
635 void opened_undef_dirfrag(CDir *dir) {
636 rejoin_undef_dirfrags.erase(dir);
637 }
638
639 void reissue_all_caps();
640
641
642 friend class Locker;
643 friend class Migrator;
644 friend class MDBalancer;
645
646 // StrayManager needs to be able to remove_inode() from us
647 // when it is done purging
648 friend class StrayManager;
649
650 // File size recovery
651 private:
652 RecoveryQueue recovery_queue;
653 void identify_files_to_recover();
654 public:
655 void start_files_to_recover();
656 void do_file_recover();
657 void queue_file_recover(CInode *in);
658 void _queued_file_recover_cow(CInode *in, MutationRef& mut);
659
660 // subsystems
661 std::unique_ptr<Migrator> migrator;
662
663 public:
664 explicit MDCache(MDSRank *m, PurgeQueue &purge_queue_);
665 ~MDCache();
666
667 // debug
668 void log_stat();
669
670 // root inode
671 CInode *get_root() { return root; }
672 CInode *get_myin() { return myin; }
673
674 // cache
675 void set_cache_size(size_t max) { lru.lru_set_max(max); }
676 size_t get_cache_size() { return lru.lru_get_size(); }
677
678 // trimming
679 bool trim(int max=-1, int count=-1); // trim cache
680 bool trim_dentry(CDentry *dn, map<mds_rank_t, MCacheExpire*>& expiremap);
681 void trim_dirfrag(CDir *dir, CDir *con,
682 map<mds_rank_t, MCacheExpire*>& expiremap);
683 bool trim_inode(CDentry *dn, CInode *in, CDir *con,
684 map<mds_rank_t,class MCacheExpire*>& expiremap);
685 void send_expire_messages(map<mds_rank_t, MCacheExpire*>& expiremap);
686 void trim_non_auth(); // trim out trimmable non-auth items
687 bool trim_non_auth_subtree(CDir *directory);
688 void standby_trim_segment(LogSegment *ls);
689 void try_trim_non_auth_subtree(CDir *dir);
690 bool can_trim_non_auth_dirfrag(CDir *dir) {
691 return my_ambiguous_imports.count((dir)->dirfrag()) == 0 &&
692 uncommitted_slave_rename_olddir.count(dir->inode) == 0;
693 }
694
695 /**
696 * For all unreferenced inodes, dirs, dentries below an inode, compose
697 * expiry messages. This is used when giving up all replicas of entities
698 * for an MDS peer in the 'stopping' state, such that the peer can
699 * empty its cache and finish shutting down.
700 *
701 * We have to make sure we're only expiring un-referenced items to
702 * avoid interfering with ongoing stray-movement (we can't distinguish
703 * between the "moving my strays" and "waiting for my cache to empty"
704 * phases within 'stopping')
705 *
706 * @return false if we completed cleanly, true if caller should stop
707 * expiring because we hit something with refs.
708 */
709 bool expire_recursive(
710 CInode *in,
711 std::map<mds_rank_t, MCacheExpire*>& expiremap);
712
713 void trim_client_leases();
714 void check_memory_usage();
715
716 utime_t last_recall_state;
717
718 // shutdown
719 private:
720 set<inodeno_t> shutdown_exported_strays;
721 public:
722 void shutdown_start();
723 void shutdown_check();
724 bool shutdown_pass();
725 bool shutdown_export_strays();
726 bool shutdown(); // clear cache (ie at shutodwn)
727
728 bool did_shutdown_log_cap;
729
730 // inode_map
731 bool have_inode(vinodeno_t vino) {
732 return inode_map.count(vino) ? true:false;
733 }
734 bool have_inode(inodeno_t ino, snapid_t snap=CEPH_NOSNAP) {
735 return have_inode(vinodeno_t(ino, snap));
736 }
737 CInode* get_inode(vinodeno_t vino) {
738 if (have_inode(vino))
739 return inode_map[vino];
740 return NULL;
741 }
742 CInode* get_inode(inodeno_t ino, snapid_t s=CEPH_NOSNAP) {
743 return get_inode(vinodeno_t(ino, s));
744 }
745
746 CDir* get_dirfrag(dirfrag_t df) {
747 CInode *in = get_inode(df.ino);
748 if (!in)
749 return NULL;
750 return in->get_dirfrag(df.frag);
751 }
752 CDir* get_dirfrag(inodeno_t ino, const string& dn) {
753 CInode *in = get_inode(ino);
754 if (!in)
755 return NULL;
756 frag_t fg = in->pick_dirfrag(dn);
757 return in->get_dirfrag(fg);
758 }
759 CDir* get_force_dirfrag(dirfrag_t df, bool replay) {
760 CInode *diri = get_inode(df.ino);
761 if (!diri)
762 return NULL;
763 CDir *dir = force_dir_fragment(diri, df.frag, replay);
764 if (!dir)
765 dir = diri->get_dirfrag(df.frag);
766 return dir;
767 }
768
769 MDSCacheObject *get_object(MDSCacheObjectInfo &info);
770
771
772
773 public:
774 void add_inode(CInode *in);
775
776 void remove_inode(CInode *in);
777 protected:
778 void touch_inode(CInode *in) {
779 if (in->get_parent_dn())
780 touch_dentry(in->get_projected_parent_dn());
781 }
782 public:
783 void touch_dentry(CDentry *dn) {
784 if (dn->state_test(CDentry::STATE_BOTTOMLRU)) {
785 bottom_lru.lru_midtouch(dn);
786 } else {
787 if (dn->is_auth())
788 lru.lru_touch(dn);
789 else
790 lru.lru_midtouch(dn);
791 }
792 }
793 void touch_dentry_bottom(CDentry *dn) {
794 if (dn->state_test(CDentry::STATE_BOTTOMLRU))
795 return;
796 lru.lru_bottouch(dn);
797 }
798 protected:
799
800 void inode_remove_replica(CInode *in, mds_rank_t rep, bool rejoin,
801 set<SimpleLock *>& gather_locks);
802 void dentry_remove_replica(CDentry *dn, mds_rank_t rep, set<SimpleLock *>& gather_locks);
803
804 void rename_file(CDentry *srcdn, CDentry *destdn);
805
806 public:
807 // truncate
808 void truncate_inode(CInode *in, LogSegment *ls);
809 void _truncate_inode(CInode *in, LogSegment *ls);
810 void truncate_inode_finish(CInode *in, LogSegment *ls);
811 void truncate_inode_logged(CInode *in, MutationRef& mut);
812
813 void add_recovered_truncate(CInode *in, LogSegment *ls);
814 void remove_recovered_truncate(CInode *in, LogSegment *ls);
815 void start_recovered_truncates();
816
817
818 public:
819 CDir *get_auth_container(CDir *in);
820 CDir *get_export_container(CDir *dir);
821 void find_nested_exports(CDir *dir, set<CDir*>& s);
822 void find_nested_exports_under(CDir *import, CDir *dir, set<CDir*>& s);
823
824
825 private:
826 bool opening_root, open;
827 list<MDSInternalContextBase*> waiting_for_open;
828
829 public:
830 void init_layouts();
831 void create_unlinked_system_inode(CInode *in, inodeno_t ino,
832 int mode) const;
833 CInode *create_system_inode(inodeno_t ino, int mode);
834 CInode *create_root_inode();
835
836 void create_empty_hierarchy(MDSGather *gather);
837 void create_mydir_hierarchy(MDSGather *gather);
838
839 bool is_open() { return open; }
840 void wait_for_open(MDSInternalContextBase *c) {
841 waiting_for_open.push_back(c);
842 }
843
844 void open_root_inode(MDSInternalContextBase *c);
845 void open_root();
846 void open_mydir_inode(MDSInternalContextBase *c);
847 void populate_mydir();
848
849 void _create_system_file(CDir *dir, const char *name, CInode *in, MDSInternalContextBase *fin);
850 void _create_system_file_finish(MutationRef& mut, CDentry *dn,
851 version_t dpv, MDSInternalContextBase *fin);
852
853 void open_foreign_mdsdir(inodeno_t ino, MDSInternalContextBase *c);
854 CDir *get_stray_dir(CInode *in);
855 CDentry *get_or_create_stray_dentry(CInode *in);
856
857 MDSInternalContextBase *_get_waiter(MDRequestRef& mdr, Message *req, MDSInternalContextBase *fin);
858
859 /**
860 * Find the given dentry (and whether it exists or not), its ancestors,
861 * and get them all into memory and usable on this MDS. This function
862 * makes a best-effort attempt to load everything; if it needs to
863 * go away and do something then it will put the request on a waitlist.
864 * It prefers the mdr, then the req, then the fin. (At least one of these
865 * must be non-null.)
866 *
867 * At least one of the params mdr, req, and fin must be non-null.
868 *
869 * @param mdr The MDRequest associated with the path. Can be null.
870 * @param req The Message associated with the path. Can be null.
871 * @param fin The Context associated with the path. Can be null.
872 * @param path The path to traverse to.
873 * @param pdnvec Data return parameter -- on success, contains a
874 * vector of dentries. On failure, is either empty or contains the
875 * full trace of traversable dentries.
876 * @param pin Data return parameter -- if successful, points to the inode
877 * associated with filepath. If unsuccessful, is null.
878 * @param onfail Specifies different lookup failure behaviors. If set to
879 * MDS_TRAVERSE_DISCOVERXLOCK, path_traverse will succeed on null
880 * dentries (instead of returning -ENOENT). If set to
881 * MDS_TRAVERSE_FORWARD, it will forward the request to the auth
882 * MDS if that becomes appropriate (ie, if it doesn't know the contents
883 * of a directory). If set to MDS_TRAVERSE_DISCOVER, it
884 * will attempt to look up the path from a different MDS (and bring them
885 * into its cache as replicas).
886 *
887 * @returns 0 on success, 1 on "not done yet", 2 on "forwarding", -errno otherwise.
888 * If it returns 1, the requester associated with this call has been placed
889 * on the appropriate waitlist, and it should unwind itself and back out.
890 * If it returns 2 the request has been forwarded, and again the requester
891 * should unwind itself and back out.
892 */
893 int path_traverse(MDRequestRef& mdr, Message *req, MDSInternalContextBase *fin, const filepath& path,
894 vector<CDentry*> *pdnvec, CInode **pin, int onfail);
895
896 CInode *cache_traverse(const filepath& path);
897
898 void open_remote_dirfrag(CInode *diri, frag_t fg, MDSInternalContextBase *fin);
899 CInode *get_dentry_inode(CDentry *dn, MDRequestRef& mdr, bool projected=false);
900
901 bool parallel_fetch(map<inodeno_t,filepath>& pathmap, set<inodeno_t>& missing);
902 bool parallel_fetch_traverse_dir(inodeno_t ino, filepath& path,
903 set<CDir*>& fetch_queue, set<inodeno_t>& missing,
904 C_GatherBuilder &gather_bld);
905
906 void open_remote_dentry(CDentry *dn, bool projected, MDSInternalContextBase *fin,
907 bool want_xlocked=false);
908 void _open_remote_dentry_finish(CDentry *dn, inodeno_t ino, MDSInternalContextBase *fin,
909 bool want_xlocked, int r);
910
911 void make_trace(vector<CDentry*>& trace, CInode *in);
912
913 protected:
914 struct open_ino_info_t {
915 vector<inode_backpointer_t> ancestors;
916 set<mds_rank_t> checked;
917 mds_rank_t checking;
918 mds_rank_t auth_hint;
919 bool check_peers;
920 bool fetch_backtrace;
921 bool discover;
922 bool want_replica;
923 bool want_xlocked;
924 version_t tid;
925 int64_t pool;
926 int last_err;
927 list<MDSInternalContextBase*> waiters;
928 open_ino_info_t() : checking(MDS_RANK_NONE), auth_hint(MDS_RANK_NONE),
929 check_peers(true), fetch_backtrace(true), discover(false),
930 want_replica(false), want_xlocked(false), tid(0), pool(-1),
931 last_err(0) {}
932 };
933 ceph_tid_t open_ino_last_tid;
934 map<inodeno_t,open_ino_info_t> opening_inodes;
935
936 void _open_ino_backtrace_fetched(inodeno_t ino, bufferlist& bl, int err);
937 void _open_ino_parent_opened(inodeno_t ino, int ret);
938 void _open_ino_traverse_dir(inodeno_t ino, open_ino_info_t& info, int err);
939 void _open_ino_fetch_dir(inodeno_t ino, MMDSOpenIno *m, CDir *dir, bool parent);
940 int open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
941 vector<inode_backpointer_t>& ancestors,
942 bool discover, bool want_xlocked, mds_rank_t *hint);
943 void open_ino_finish(inodeno_t ino, open_ino_info_t& info, int err);
944 void do_open_ino(inodeno_t ino, open_ino_info_t& info, int err);
945 void do_open_ino_peer(inodeno_t ino, open_ino_info_t& info);
946 void handle_open_ino(MMDSOpenIno *m, int err=0);
947 void handle_open_ino_reply(MMDSOpenInoReply *m);
948 friend class C_IO_MDC_OpenInoBacktraceFetched;
949 friend struct C_MDC_OpenInoTraverseDir;
950 friend struct C_MDC_OpenInoParentOpened;
951
952 public:
953 void kick_open_ino_peers(mds_rank_t who);
954 void open_ino(inodeno_t ino, int64_t pool, MDSInternalContextBase *fin,
955 bool want_replica=true, bool want_xlocked=false);
956
957 // -- find_ino_peer --
958 struct find_ino_peer_info_t {
959 inodeno_t ino;
960 ceph_tid_t tid;
961 MDSInternalContextBase *fin;
962 mds_rank_t hint;
963 mds_rank_t checking;
964 set<mds_rank_t> checked;
965
966 find_ino_peer_info_t() : tid(0), fin(NULL), hint(MDS_RANK_NONE), checking(MDS_RANK_NONE) {}
967 };
968
969 map<ceph_tid_t, find_ino_peer_info_t> find_ino_peer;
970 ceph_tid_t find_ino_peer_last_tid;
971
972 void find_ino_peers(inodeno_t ino, MDSInternalContextBase *c, mds_rank_t hint=MDS_RANK_NONE);
973 void _do_find_ino_peer(find_ino_peer_info_t& fip);
974 void handle_find_ino(MMDSFindIno *m);
975 void handle_find_ino_reply(MMDSFindInoReply *m);
976 void kick_find_ino_peers(mds_rank_t who);
977
978 // -- snaprealms --
979 public:
980 void snaprealm_create(MDRequestRef& mdr, CInode *in);
981 void _snaprealm_create_finish(MDRequestRef& mdr, MutationRef& mut, CInode *in);
982
983 // -- stray --
984 public:
985 void fetch_backtrace(inodeno_t ino, int64_t pool, bufferlist& bl, Context *fin);
986 uint64_t get_num_strays() const { return stray_manager.get_num_strays(); }
987
988 protected:
989 void scan_stray_dir(dirfrag_t next=dirfrag_t());
990 StrayManager stray_manager;
991 friend struct C_MDC_RetryScanStray;
992 friend class C_IO_MDC_FetchedBacktrace;
993
994 // == messages ==
995 public:
996 void dispatch(Message *m);
997
998 protected:
999 // -- replicas --
1000 void handle_discover(MDiscover *dis);
1001 void handle_discover_reply(MDiscoverReply *m);
1002 friend class C_MDC_Join;
1003
1004 public:
1005 void replicate_dir(CDir *dir, mds_rank_t to, bufferlist& bl) {
1006 dirfrag_t df = dir->dirfrag();
1007 ::encode(df, bl);
1008 dir->encode_replica(to, bl);
1009 }
1010 void replicate_dentry(CDentry *dn, mds_rank_t to, bufferlist& bl) {
1011 ::encode(dn->name, bl);
1012 ::encode(dn->last, bl);
1013 dn->encode_replica(to, bl);
1014 }
1015 void replicate_inode(CInode *in, mds_rank_t to, bufferlist& bl,
1016 uint64_t features) {
1017 ::encode(in->inode.ino, bl); // bleh, minor assymetry here
1018 ::encode(in->last, bl);
1019 in->encode_replica(to, bl, features);
1020 }
1021
1022 CDir* add_replica_dir(bufferlist::iterator& p, CInode *diri, mds_rank_t from, list<MDSInternalContextBase*>& finished);
1023 CDentry *add_replica_dentry(bufferlist::iterator& p, CDir *dir, list<MDSInternalContextBase*>& finished);
1024 CInode *add_replica_inode(bufferlist::iterator& p, CDentry *dn, list<MDSInternalContextBase*>& finished);
1025
1026 void replicate_stray(CDentry *straydn, mds_rank_t who, bufferlist& bl);
1027 CDentry *add_replica_stray(bufferlist &bl, mds_rank_t from);
1028
1029 // -- namespace --
1030 public:
1031 void send_dentry_link(CDentry *dn, MDRequestRef& mdr);
1032 void send_dentry_unlink(CDentry *dn, CDentry *straydn, MDRequestRef& mdr);
1033 protected:
1034 void handle_dentry_link(MDentryLink *m);
1035 void handle_dentry_unlink(MDentryUnlink *m);
1036
1037
1038 // -- fragmenting --
1039 private:
1040 struct ufragment {
1041 int bits;
1042 bool committed;
1043 LogSegment *ls;
1044 list<MDSInternalContextBase*> waiters;
1045 list<frag_t> old_frags;
1046 bufferlist rollback;
1047 ufragment() : bits(0), committed(false), ls(NULL) {}
1048 };
1049 map<dirfrag_t, ufragment> uncommitted_fragments;
1050
1051 struct fragment_info_t {
1052 int bits;
1053 list<CDir*> dirs;
1054 list<CDir*> resultfrags;
1055 MDRequestRef mdr;
1056 // for deadlock detection
1057 bool all_frozen;
1058 utime_t last_cum_auth_pins_change;
1059 int last_cum_auth_pins;
1060 int num_remote_waiters; // number of remote authpin waiters
1061 fragment_info_t() : bits(0), all_frozen(false), last_cum_auth_pins(0), num_remote_waiters(0) {}
1062 bool is_fragmenting() { return !resultfrags.empty(); }
1063 };
1064 map<dirfrag_t,fragment_info_t> fragments;
1065
1066 void adjust_dir_fragments(CInode *diri, frag_t basefrag, int bits,
1067 list<CDir*>& frags, list<MDSInternalContextBase*>& waiters, bool replay);
1068 void adjust_dir_fragments(CInode *diri,
1069 list<CDir*>& srcfrags,
1070 frag_t basefrag, int bits,
1071 list<CDir*>& resultfrags,
1072 list<MDSInternalContextBase*>& waiters,
1073 bool replay);
1074 CDir *force_dir_fragment(CInode *diri, frag_t fg, bool replay=true);
1075 void get_force_dirfrag_bound_set(vector<dirfrag_t>& dfs, set<CDir*>& bounds);
1076
1077 bool can_fragment(CInode *diri, list<CDir*>& dirs);
1078 void fragment_freeze_dirs(list<CDir*>& dirs);
1079 void fragment_mark_and_complete(MDRequestRef& mdr);
1080 void fragment_frozen(MDRequestRef& mdr, int r);
1081 void fragment_unmark_unfreeze_dirs(list<CDir*>& dirs);
1082 void dispatch_fragment_dir(MDRequestRef& mdr);
1083 void _fragment_logged(MDRequestRef& mdr);
1084 void _fragment_stored(MDRequestRef& mdr);
1085 void _fragment_committed(dirfrag_t f, list<CDir*>& resultfrags);
1086 void _fragment_finish(dirfrag_t f, list<CDir*>& resultfrags);
1087
1088 friend class EFragment;
1089 friend class C_MDC_FragmentFrozen;
1090 friend class C_MDC_FragmentMarking;
1091 friend class C_MDC_FragmentPrep;
1092 friend class C_MDC_FragmentStore;
1093 friend class C_MDC_FragmentCommit;
1094 friend class C_IO_MDC_FragmentFinish;
1095
1096 void handle_fragment_notify(MMDSFragmentNotify *m);
1097
1098 void add_uncommitted_fragment(dirfrag_t basedirfrag, int bits, list<frag_t>& old_frag,
1099 LogSegment *ls, bufferlist *rollback=NULL);
1100 void finish_uncommitted_fragment(dirfrag_t basedirfrag, int op);
1101 void rollback_uncommitted_fragment(dirfrag_t basedirfrag, list<frag_t>& old_frags);
1102 public:
1103 void wait_for_uncommitted_fragment(dirfrag_t dirfrag, MDSInternalContextBase *c) {
1104 assert(uncommitted_fragments.count(dirfrag));
1105 uncommitted_fragments[dirfrag].waiters.push_back(c);
1106 }
1107 void split_dir(CDir *dir, int byn);
1108 void merge_dir(CInode *diri, frag_t fg);
1109 void rollback_uncommitted_fragments();
1110
1111 void find_stale_fragment_freeze();
1112 void fragment_freeze_inc_num_waiters(CDir *dir);
1113 bool fragment_are_all_frozen(CDir *dir);
1114 int get_num_fragmenting_dirs() { return fragments.size(); }
1115
1116 // -- updates --
1117 //int send_inode_updates(CInode *in);
1118 //void handle_inode_update(MInodeUpdate *m);
1119
1120 int send_dir_updates(CDir *in, bool bcast=false);
1121 void handle_dir_update(MDirUpdate *m);
1122
1123 // -- cache expiration --
1124 void handle_cache_expire(MCacheExpire *m);
1125 void process_delayed_expire(CDir *dir);
1126 void discard_delayed_expire(CDir *dir);
1127
1128 protected:
1129 int dump_cache(const char *fn, Formatter *f,
1130 const std::string& dump_root = "",
1131 int depth = -1);
1132 public:
1133 int dump_cache() { return dump_cache(NULL, NULL); }
1134 int dump_cache(const std::string &filename);
1135 int dump_cache(Formatter *f);
1136 int dump_cache(const std::string& dump_root, int depth, Formatter *f);
1137
1138 void dump_resolve_status(Formatter *f) const;
1139 void dump_rejoin_status(Formatter *f) const;
1140
1141 // == crap fns ==
1142 public:
1143 void show_cache();
1144 void show_subtrees(int dbl=10);
1145
1146 CInode *hack_pick_random_inode() {
1147 assert(!inode_map.empty());
1148 int n = rand() % inode_map.size();
1149 ceph::unordered_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
1150 while (n--) ++p;
1151 return p->second;
1152 }
1153
1154 protected:
1155 void flush_dentry_work(MDRequestRef& mdr);
1156 /**
1157 * Resolve path to a dentry and pass it onto the ScrubStack.
1158 *
1159 * TODO: return enough information to the original mdr formatter
1160 * and completion that they can subsequeuntly check the progress of
1161 * this scrub (we won't block them on a whole scrub as it can take a very
1162 * long time)
1163 */
1164 void enqueue_scrub_work(MDRequestRef& mdr);
1165 void repair_inode_stats_work(MDRequestRef& mdr);
1166 void repair_dirfrag_stats_work(MDRequestRef& mdr);
1167 friend class C_MDC_RepairDirfragStats;
1168 public:
1169 void flush_dentry(const string& path, Context *fin);
1170 /**
1171 * Create and start an OP_ENQUEUE_SCRUB
1172 */
1173 void enqueue_scrub(const string& path, const std::string &tag,
1174 bool force, bool recursive, bool repair,
1175 Formatter *f, Context *fin);
1176 void repair_inode_stats(CInode *diri);
1177 void repair_dirfrag_stats(CDir *dir);
1178
1179 public:
1180 /* Because exports may fail, this set lets us keep track of inodes that need exporting. */
1181 std::set<CInode *> export_pin_queue;
1182 };
1183
1184 class C_MDS_RetryRequest : public MDSInternalContext {
1185 MDCache *cache;
1186 MDRequestRef mdr;
1187 public:
1188 C_MDS_RetryRequest(MDCache *c, MDRequestRef& r);
1189 void finish(int r) override;
1190 };
1191
1192 #endif