]> git.proxmox.com Git - ceph.git/blame - ceph/src/mds/MDCache.h
update sources to v12.2.3
[ceph.git] / ceph / src / mds / MDCache.h
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15
16
17#ifndef CEPH_MDCACHE_H
18#define CEPH_MDCACHE_H
19
20#include "include/types.h"
21#include "include/filepath.h"
22#include "include/elist.h"
23
24#include "osdc/Filer.h"
25#include "CInode.h"
26#include "CDentry.h"
27#include "CDir.h"
28#include "include/Context.h"
29#include "events/EMetaBlob.h"
30#include "RecoveryQueue.h"
31#include "StrayManager.h"
32#include "MDSContext.h"
33#include "MDSMap.h"
34#include "Mutation.h"
35
36#include "messages/MClientRequest.h"
37#include "messages/MMDSSlaveRequest.h"
38
39class PerfCounters;
40
41class MDSRank;
42class Session;
43class Migrator;
44
45class Message;
46class Session;
47
48class MMDSResolve;
49class MMDSResolveAck;
50class MMDSCacheRejoin;
51class MDiscover;
52class MDiscoverReply;
53class MCacheExpire;
54class MDirUpdate;
55class MDentryLink;
56class MDentryUnlink;
57class MLock;
58struct MMDSFindIno;
59struct MMDSFindInoReply;
60struct MMDSOpenIno;
61struct MMDSOpenInoReply;
62
63class Message;
64class MClientRequest;
65class MMDSSlaveRequest;
66struct MClientSnap;
67
68class MMDSFragmentNotify;
69
70class ESubtreeMap;
71
72enum {
73 l_mdc_first = 3000,
74 // How many inodes currently in stray dentries
75 l_mdc_num_strays,
76 // How many stray dentries are currently delayed for purge due to refs
77 l_mdc_num_strays_delayed,
78 // How many stray dentries are currently being enqueued for purge
79 l_mdc_num_strays_enqueuing,
80
81 // How many dentries have ever been added to stray dir
82 l_mdc_strays_created,
83 // How many dentries have been passed on to PurgeQueue
84 l_mdc_strays_enqueued,
85 // How many strays have been reintegrated?
86 l_mdc_strays_reintegrated,
87 // How many strays have been migrated?
88 l_mdc_strays_migrated,
89
90 // How many inode sizes currently being recovered
91 l_mdc_num_recovering_processing,
92 // How many inodes currently waiting to have size recovered
93 l_mdc_num_recovering_enqueued,
94 // How many inodes waiting with elevated priority for recovery
95 l_mdc_num_recovering_prioritized,
96 // How many inodes ever started size recovery
97 l_mdc_recovery_started,
98 // How many inodes ever completed size recovery
99 l_mdc_recovery_completed,
100
d2e6a577
FG
101 l_mdss_ireq_enqueue_scrub,
102 l_mdss_ireq_exportdir,
103 l_mdss_ireq_flush,
104 l_mdss_ireq_fragmentdir,
105 l_mdss_ireq_fragstats,
106 l_mdss_ireq_inodestats,
107
7c673cae
FG
108 l_mdc_last,
109};
110
111
112// flags for predirty_journal_parents()
113static const int PREDIRTY_PRIMARY = 1; // primary dn, adjust nested accounting
114static const int PREDIRTY_DIR = 2; // update parent dir mtime/size
115static const int PREDIRTY_SHALLOW = 4; // only go to immediate parent (for easier rollback)
116
117class MDCache {
118 public:
119 // my master
120 MDSRank *mds;
121
122 // -- my cache --
123 LRU lru; // dentry lru for expiring items from cache
31f18b77 124 LRU bottom_lru; // dentries that should be trimmed ASAP
7c673cae 125 protected:
b32b8144
FG
126 ceph::unordered_map<inodeno_t,CInode*> inode_map; // map of head inodes by ino
127 map<vinodeno_t, CInode*> snap_inode_map; // map of snap inodes by ino
7c673cae
FG
128 CInode *root; // root inode
129 CInode *myin; // .ceph/mds%d dir
130
131 bool readonly;
132 void set_readonly() { readonly = true; }
133
134 CInode *strays[NUM_STRAY]; // my stray dir
135 int stray_index;
136
137 CInode *get_stray() {
138 return strays[stray_index];
139 }
140
141 set<CInode*> base_inodes;
142
143 std::unique_ptr<PerfCounters> logger;
144
145 Filer filer;
146
147 bool exceeded_size_limit;
148
149public:
181888fb
FG
150 static uint64_t cache_limit_inodes(void) {
151 return g_conf->get_val<int64_t>("mds_cache_size");
152 }
153 static uint64_t cache_limit_memory(void) {
154 return g_conf->get_val<uint64_t>("mds_cache_memory_limit");
155 }
156 static double cache_reservation(void) {
157 return g_conf->get_val<double>("mds_cache_reservation");
158 }
159 static double cache_mid(void) {
160 return g_conf->get_val<double>("mds_cache_mid");
161 }
162 static double cache_health_threshold(void) {
163 return g_conf->get_val<double>("mds_health_cache_threshold");
164 }
165 double cache_toofull_ratio(void) const {
166 uint64_t inode_limit = cache_limit_inodes();
167 double inode_reserve = inode_limit*(1.0-cache_reservation());
168 double memory_reserve = cache_limit_memory()*(1.0-cache_reservation());
169 return fmax(0.0, fmax((cache_size()-memory_reserve)/memory_reserve, inode_limit == 0 ? 0.0 : (CInode::count()-inode_reserve)/inode_reserve));
170 }
171 bool cache_toofull(void) const {
172 return cache_toofull_ratio() > 0.0;
173 }
174 uint64_t cache_size(void) const {
175 return mempool::get_pool(mempool::mds_co::id).allocated_bytes();
176 }
177 bool cache_overfull(void) const {
178 uint64_t inode_limit = cache_limit_inodes();
179 return (inode_limit > 0 && CInode::count() > inode_limit*cache_health_threshold()) || (cache_size() > cache_limit_memory()*cache_health_threshold());
180 }
181
7c673cae
FG
182 void advance_stray() {
183 stray_index = (stray_index+1)%NUM_STRAY;
184 }
185
186 void activate_stray_manager();
187
188 /**
189 * Call this when you know that a CDentry is ready to be passed
190 * on to StrayManager (i.e. this is a stray you've just created)
191 */
192 void notify_stray(CDentry *dn) {
193 assert(dn->get_dir()->get_inode()->is_stray());
194 stray_manager.eval_stray(dn);
195 }
196
197 void maybe_eval_stray(CInode *in, bool delay=false);
31f18b77
FG
198 void clear_dirty_bits_for_stray(CInode* diri);
199
7c673cae
FG
200 bool is_readonly() { return readonly; }
201 void force_readonly();
202
203 DecayRate decayrate;
204
b32b8144
FG
205 int num_shadow_inodes;
206
7c673cae
FG
207 int num_inodes_with_caps;
208
209 unsigned max_dir_commit_size;
210
211 static file_layout_t gen_default_file_layout(const MDSMap &mdsmap);
212 static file_layout_t gen_default_log_layout(const MDSMap &mdsmap);
213
214 file_layout_t default_file_layout;
215 file_layout_t default_log_layout;
216
217 void register_perfcounters();
218
219 // -- client leases --
220public:
221 static const int client_lease_pools = 3;
222 float client_lease_durations[client_lease_pools];
223protected:
224 xlist<ClientLease*> client_leases[client_lease_pools];
225public:
226 void touch_client_lease(ClientLease *r, int pool, utime_t ttl) {
227 client_leases[pool].push_back(&r->item_lease);
228 r->ttl = ttl;
229 }
230
231 void notify_stray_removed()
232 {
233 stray_manager.notify_stray_removed();
234 }
235
236 void notify_stray_created()
237 {
238 stray_manager.notify_stray_created();
239 }
240
31f18b77
FG
241 void eval_remote(CDentry *dn)
242 {
243 stray_manager.eval_remote(dn);
244 }
245
7c673cae
FG
246 // -- client caps --
247 uint64_t last_cap_id;
248
249
250
251 // -- discover --
252 struct discover_info_t {
253 ceph_tid_t tid;
254 mds_rank_t mds;
255 inodeno_t ino;
256 frag_t frag;
257 snapid_t snap;
258 filepath want_path;
31f18b77 259 CInode *basei;
7c673cae
FG
260 bool want_base_dir;
261 bool want_xlocked;
262
263 discover_info_t() :
31f18b77 264 tid(0), mds(-1), snap(CEPH_NOSNAP), basei(NULL),
7c673cae
FG
265 want_base_dir(false), want_xlocked(false) {}
266 ~discover_info_t() {
31f18b77
FG
267 if (basei)
268 basei->put(MDSCacheObject::PIN_DISCOVERBASE);
7c673cae 269 }
31f18b77
FG
270 void pin_base(CInode *b) {
271 basei = b;
272 basei->get(MDSCacheObject::PIN_DISCOVERBASE);
7c673cae
FG
273 }
274 };
275
276 map<ceph_tid_t, discover_info_t> discovers;
277 ceph_tid_t discover_last_tid;
278
279 void _send_discover(discover_info_t& dis);
280 discover_info_t& _create_discover(mds_rank_t mds) {
281 ceph_tid_t t = ++discover_last_tid;
282 discover_info_t& d = discovers[t];
283 d.tid = t;
284 d.mds = mds;
285 return d;
286 }
287
288 // waiters
289 map<int, map<inodeno_t, list<MDSInternalContextBase*> > > waiting_for_base_ino;
290
291 void discover_base_ino(inodeno_t want_ino, MDSInternalContextBase *onfinish, mds_rank_t from=MDS_RANK_NONE);
292 void discover_dir_frag(CInode *base, frag_t approx_fg, MDSInternalContextBase *onfinish,
293 mds_rank_t from=MDS_RANK_NONE);
294 void discover_path(CInode *base, snapid_t snap, filepath want_path, MDSInternalContextBase *onfinish,
295 bool want_xlocked=false, mds_rank_t from=MDS_RANK_NONE);
296 void discover_path(CDir *base, snapid_t snap, filepath want_path, MDSInternalContextBase *onfinish,
297 bool want_xlocked=false);
298 void kick_discovers(mds_rank_t who); // after a failure.
299
300
301 // -- subtrees --
302protected:
303 /* subtree keys and each tree's non-recursive nested subtrees (the "bounds") */
304 map<CDir*,set<CDir*> > subtrees;
305 map<CInode*,list<pair<CDir*,CDir*> > > projected_subtree_renames; // renamed ino -> target dir
306
307 // adjust subtree auth specification
308 // dir->dir_auth
309 // imports/exports/nested_exports
310 // join/split subtrees as appropriate
311public:
312 bool is_subtrees() { return !subtrees.empty(); }
313 void list_subtrees(list<CDir*>& ls);
224ce89b
WB
314 void adjust_subtree_auth(CDir *root, mds_authority_t auth);
315 void adjust_subtree_auth(CDir *root, mds_rank_t a, mds_rank_t b=CDIR_AUTH_UNKNOWN) {
316 adjust_subtree_auth(root, mds_authority_t(a,b));
7c673cae
FG
317 }
318 void adjust_bounded_subtree_auth(CDir *dir, set<CDir*>& bounds, mds_authority_t auth);
319 void adjust_bounded_subtree_auth(CDir *dir, set<CDir*>& bounds, mds_rank_t a) {
320 adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
321 }
322 void adjust_bounded_subtree_auth(CDir *dir, vector<dirfrag_t>& bounds, mds_authority_t auth);
323 void adjust_bounded_subtree_auth(CDir *dir, vector<dirfrag_t>& bounds, mds_rank_t a) {
324 adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
325 }
326 void map_dirfrag_set(list<dirfrag_t>& dfs, set<CDir*>& result);
327 void try_subtree_merge(CDir *root);
224ce89b 328 void try_subtree_merge_at(CDir *root, set<CInode*> *to_eval);
7c673cae
FG
329 void subtree_merge_writebehind_finish(CInode *in, MutationRef& mut);
330 void eval_subtree_root(CInode *diri);
331 CDir *get_subtree_root(CDir *dir);
332 CDir *get_projected_subtree_root(CDir *dir);
333 bool is_leaf_subtree(CDir *dir) {
334 assert(subtrees.count(dir));
335 return subtrees[dir].empty();
336 }
337 void remove_subtree(CDir *dir);
338 bool is_subtree(CDir *root) {
339 return subtrees.count(root);
340 }
341 void get_subtree_bounds(CDir *root, set<CDir*>& bounds);
342 void get_wouldbe_subtree_bounds(CDir *root, set<CDir*>& bounds);
343 void verify_subtree_bounds(CDir *root, const set<CDir*>& bounds);
344 void verify_subtree_bounds(CDir *root, const list<dirfrag_t>& bounds);
345
346 void project_subtree_rename(CInode *diri, CDir *olddir, CDir *newdir);
224ce89b 347 void adjust_subtree_after_rename(CInode *diri, CDir *olddir, bool pop);
7c673cae
FG
348
349 void get_auth_subtrees(set<CDir*>& s);
350 void get_fullauth_subtrees(set<CDir*>& s);
351
352 int num_subtrees();
353 int num_subtrees_fullauth();
354 int num_subtrees_fullnonauth();
355
356
357protected:
358 // delayed cache expire
359 map<CDir*, map<mds_rank_t, MCacheExpire*> > delayed_expire; // subtree root -> expire msg
360
361
362 // -- requests --
363 ceph::unordered_map<metareqid_t, MDRequestRef> active_requests;
364
365public:
366 int get_num_client_requests();
367
368 MDRequestRef request_start(MClientRequest *req);
369 MDRequestRef request_start_slave(metareqid_t rid, __u32 attempt, Message *m);
370 MDRequestRef request_start_internal(int op);
371 bool have_request(metareqid_t rid) {
372 return active_requests.count(rid);
373 }
374 MDRequestRef request_get(metareqid_t rid);
375 void request_pin_ref(MDRequestRef& r, CInode *ref, vector<CDentry*>& trace);
376 void request_finish(MDRequestRef& mdr);
377 void request_forward(MDRequestRef& mdr, mds_rank_t mds, int port=0);
378 void dispatch_request(MDRequestRef& mdr);
379 void request_drop_foreign_locks(MDRequestRef& mdr);
380 void request_drop_non_rdlocks(MDRequestRef& r);
381 void request_drop_locks(MDRequestRef& r);
382 void request_cleanup(MDRequestRef& r);
383
384 void request_kill(MDRequestRef& r); // called when session closes
385
386 // journal/snap helpers
387 CInode *pick_inode_snap(CInode *in, snapid_t follows);
388 CInode *cow_inode(CInode *in, snapid_t last);
389 void journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, CDentry *dn,
390 snapid_t follows=CEPH_NOSNAP,
391 CInode **pcow_inode=0, CDentry::linkage_t *dnl=0);
392 void journal_cow_inode(MutationRef& mut, EMetaBlob *metablob, CInode *in, snapid_t follows=CEPH_NOSNAP,
393 CInode **pcow_inode=0);
394 void journal_dirty_inode(MutationImpl *mut, EMetaBlob *metablob, CInode *in, snapid_t follows=CEPH_NOSNAP);
395
396 void project_rstat_inode_to_frag(CInode *cur, CDir *parent, snapid_t first,
397 int linkunlink, SnapRealm *prealm);
398 void _project_rstat_inode_to_frag(inode_t& inode, snapid_t ofirst, snapid_t last,
399 CDir *parent, int linkunlink, bool update_inode);
400 void project_rstat_frag_to_inode(nest_info_t& rstat, nest_info_t& accounted_rstat,
401 snapid_t ofirst, snapid_t last,
402 CInode *pin, bool cow_head);
403 void broadcast_quota_to_client(CInode *in);
404 void predirty_journal_parents(MutationRef mut, EMetaBlob *blob,
405 CInode *in, CDir *parent,
406 int flags, int linkunlink=0,
407 snapid_t follows=CEPH_NOSNAP);
408
409 // slaves
410 void add_uncommitted_master(metareqid_t reqid, LogSegment *ls, set<mds_rank_t> &slaves, bool safe=false) {
411 uncommitted_masters[reqid].ls = ls;
412 uncommitted_masters[reqid].slaves = slaves;
413 uncommitted_masters[reqid].safe = safe;
414 }
415 void wait_for_uncommitted_master(metareqid_t reqid, MDSInternalContextBase *c) {
416 uncommitted_masters[reqid].waiters.push_back(c);
417 }
418 bool have_uncommitted_master(metareqid_t reqid, mds_rank_t from) {
419 auto p = uncommitted_masters.find(reqid);
420 return p != uncommitted_masters.end() && p->second.slaves.count(from) > 0;
421 }
422 void log_master_commit(metareqid_t reqid);
423 void logged_master_update(metareqid_t reqid);
424 void _logged_master_commit(metareqid_t reqid);
425 void committed_master_slave(metareqid_t r, mds_rank_t from);
426 void finish_committed_masters();
427
428 void _logged_slave_commit(mds_rank_t from, metareqid_t reqid);
429
430 // -- recovery --
431protected:
432 set<mds_rank_t> recovery_set;
433
434public:
435 void set_recovery_set(set<mds_rank_t>& s);
436 void handle_mds_failure(mds_rank_t who);
437 void handle_mds_recovery(mds_rank_t who);
438
439protected:
440 // [resolve]
441 // from EImportStart w/o EImportFinish during journal replay
442 map<dirfrag_t, vector<dirfrag_t> > my_ambiguous_imports;
443 // from MMDSResolves
444 map<mds_rank_t, map<dirfrag_t, vector<dirfrag_t> > > other_ambiguous_imports;
445
446 map<mds_rank_t, map<metareqid_t, MDSlaveUpdate*> > uncommitted_slave_updates; // slave: for replay.
447 map<CInode*, int> uncommitted_slave_rename_olddir; // slave: preserve the non-auth dir until seeing commit.
448 map<CInode*, int> uncommitted_slave_unlink; // slave: preserve the unlinked inode until seeing commit.
449
450 // track master requests whose slaves haven't acknowledged commit
451 struct umaster {
452 set<mds_rank_t> slaves;
453 LogSegment *ls;
454 list<MDSInternalContextBase*> waiters;
455 bool safe;
456 bool committing;
457 bool recovering;
458 umaster() : ls(NULL), safe(false), committing(false), recovering(false) {}
459 };
460 map<metareqid_t, umaster> uncommitted_masters; // master: req -> slave set
461
462 set<metareqid_t> pending_masters;
463 map<int, set<metareqid_t> > ambiguous_slave_updates;
464
465 friend class ESlaveUpdate;
466 friend class ECommitted;
467
468 bool resolves_pending;
469 set<mds_rank_t> resolve_gather; // nodes i need resolves from
470 set<mds_rank_t> resolve_ack_gather; // nodes i need a resolve_ack from
471 map<metareqid_t, mds_rank_t> need_resolve_rollback; // rollbacks i'm writing to the journal
472 map<mds_rank_t, MMDSResolve*> delayed_resolve;
473
474 void handle_resolve(MMDSResolve *m);
475 void handle_resolve_ack(MMDSResolveAck *m);
476 void process_delayed_resolve();
477 void discard_delayed_resolve(mds_rank_t who);
478 void maybe_resolve_finish();
479 void disambiguate_my_imports();
480 void disambiguate_other_imports();
481 void trim_unlinked_inodes();
482 void add_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master, MDSlaveUpdate*);
483 void finish_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master);
484 MDSlaveUpdate* get_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master);
485public:
486 void recalc_auth_bits(bool replay);
487 void remove_inode_recursive(CInode *in);
488
489 bool is_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
490 auto p = ambiguous_slave_updates.find(master);
491 return p != ambiguous_slave_updates.end() && p->second.count(reqid);
492 }
493 void add_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
494 ambiguous_slave_updates[master].insert(reqid);
495 }
496 void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
497 auto p = ambiguous_slave_updates.find(master);
498 auto q = p->second.find(reqid);
499 assert(q != p->second.end());
500 p->second.erase(q);
501 if (p->second.empty())
502 ambiguous_slave_updates.erase(p);
503 }
504
505 void add_rollback(metareqid_t reqid, mds_rank_t master) {
506 need_resolve_rollback[reqid] = master;
507 }
508 void finish_rollback(metareqid_t reqid);
509
510 // ambiguous imports
511 void add_ambiguous_import(dirfrag_t base, const vector<dirfrag_t>& bounds);
512 void add_ambiguous_import(CDir *base, const set<CDir*>& bounds);
513 bool have_ambiguous_import(dirfrag_t base) {
514 return my_ambiguous_imports.count(base);
515 }
516 void get_ambiguous_import_bounds(dirfrag_t base, vector<dirfrag_t>& bounds) {
517 assert(my_ambiguous_imports.count(base));
518 bounds = my_ambiguous_imports[base];
519 }
520 void cancel_ambiguous_import(CDir *);
521 void finish_ambiguous_import(dirfrag_t dirino);
522 void resolve_start(MDSInternalContext *resolve_done_);
523 void send_resolves();
524 void send_slave_resolves();
525 void send_subtree_resolves();
526 void maybe_send_pending_resolves() {
527 if (resolves_pending)
528 send_subtree_resolves();
529 }
530
531 void _move_subtree_map_bound(dirfrag_t df, dirfrag_t oldparent, dirfrag_t newparent,
532 map<dirfrag_t,vector<dirfrag_t> >& subtrees);
533 ESubtreeMap *create_subtree_map();
534
535
536 void clean_open_file_lists();
537
538protected:
539 // [rejoin]
540 bool rejoins_pending;
541 set<mds_rank_t> rejoin_gather; // nodes from whom i need a rejoin
542 set<mds_rank_t> rejoin_sent; // nodes i sent a rejoin to
31f18b77 543 set<mds_rank_t> rejoin_ack_sent; // nodes i sent a rejoin to
7c673cae
FG
544 set<mds_rank_t> rejoin_ack_gather; // nodes from whom i need a rejoin ack
545 map<mds_rank_t,map<inodeno_t,map<client_t,Capability::Import> > > rejoin_imported_caps;
546 map<inodeno_t,pair<mds_rank_t,map<client_t,Capability::Export> > > rejoin_slave_exports;
547 map<client_t,entity_inst_t> rejoin_client_map;
548
549 map<inodeno_t,map<client_t,cap_reconnect_t> > cap_exports; // ino -> client -> capex
550 map<inodeno_t,mds_rank_t> cap_export_targets; // ino -> auth mds
551
552 map<inodeno_t,map<client_t,map<mds_rank_t,cap_reconnect_t> > > cap_imports; // ino -> client -> frommds -> capex
553 set<inodeno_t> cap_imports_missing;
554 map<inodeno_t, list<MDSInternalContextBase*> > cap_reconnect_waiters;
555 int cap_imports_num_opening;
556
557 set<CInode*> rejoin_undef_inodes;
558 set<CInode*> rejoin_potential_updated_scatterlocks;
559 set<CDir*> rejoin_undef_dirfrags;
560 map<mds_rank_t, set<CInode*> > rejoin_unlinked_inodes;
561
562 vector<CInode*> rejoin_recover_q, rejoin_check_q;
563 list<SimpleLock*> rejoin_eval_locks;
564 list<MDSInternalContextBase*> rejoin_waiters;
565
566 void rejoin_walk(CDir *dir, MMDSCacheRejoin *rejoin);
567 void handle_cache_rejoin(MMDSCacheRejoin *m);
568 void handle_cache_rejoin_weak(MMDSCacheRejoin *m);
569 CInode* rejoin_invent_inode(inodeno_t ino, snapid_t last);
570 CDir* rejoin_invent_dirfrag(dirfrag_t df);
571 void handle_cache_rejoin_strong(MMDSCacheRejoin *m);
572 void rejoin_scour_survivor_replicas(mds_rank_t from, MMDSCacheRejoin *ack,
573 set<vinodeno_t>& acked_inodes,
574 set<SimpleLock *>& gather_locks);
575 void handle_cache_rejoin_ack(MMDSCacheRejoin *m);
576 void rejoin_send_acks();
577 void rejoin_trim_undef_inodes();
578 void maybe_send_pending_rejoins() {
579 if (rejoins_pending)
580 rejoin_send_rejoins();
581 }
582 std::unique_ptr<MDSInternalContext> rejoin_done;
583 std::unique_ptr<MDSInternalContext> resolve_done;
584public:
585 void rejoin_start(MDSInternalContext *rejoin_done_);
586 void rejoin_gather_finish();
587 void rejoin_send_rejoins();
588 void rejoin_export_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
589 int target=-1) {
590 cap_exports[ino][client] = icr;
591 cap_export_targets[ino] = target;
592 }
593 void rejoin_recovered_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
594 mds_rank_t frommds=MDS_RANK_NONE) {
595 cap_imports[ino][client][frommds] = icr;
596 }
597 const cap_reconnect_t *get_replay_cap_reconnect(inodeno_t ino, client_t client) {
598 if (cap_imports.count(ino) &&
599 cap_imports[ino].count(client) &&
600 cap_imports[ino][client].count(MDS_RANK_NONE)) {
601 return &cap_imports[ino][client][MDS_RANK_NONE];
602 }
603 return NULL;
604 }
605 void remove_replay_cap_reconnect(inodeno_t ino, client_t client) {
606 assert(cap_imports[ino].size() == 1);
607 assert(cap_imports[ino][client].size() == 1);
608 cap_imports.erase(ino);
609 }
610 void wait_replay_cap_reconnect(inodeno_t ino, MDSInternalContextBase *c) {
611 cap_reconnect_waiters[ino].push_back(c);
612 }
613
614 // [reconnect/rejoin caps]
615 struct reconnected_cap_info_t {
616 inodeno_t realm_ino;
617 snapid_t snap_follows;
618 int dirty_caps;
619 reconnected_cap_info_t() :
620 realm_ino(0), snap_follows(0), dirty_caps(0) {}
621 };
622 map<inodeno_t,map<client_t, reconnected_cap_info_t> > reconnected_caps; // inode -> client -> snap_follows,realmino
623 map<inodeno_t,map<client_t, snapid_t> > reconnected_snaprealms; // realmino -> client -> realmseq
624
625 void add_reconnected_cap(client_t client, inodeno_t ino, const cap_reconnect_t& icr) {
626 reconnected_cap_info_t &info = reconnected_caps[ino][client];
627 info.realm_ino = inodeno_t(icr.capinfo.snaprealm);
628 info.snap_follows = icr.snap_follows;
629 }
630 void set_reconnected_dirty_caps(client_t client, inodeno_t ino, int dirty) {
631 reconnected_cap_info_t &info = reconnected_caps[ino][client];
632 info.dirty_caps |= dirty;
633 }
634 void add_reconnected_snaprealm(client_t client, inodeno_t ino, snapid_t seq) {
635 reconnected_snaprealms[ino][client] = seq;
636 }
637
638 friend class C_MDC_RejoinOpenInoFinish;
639 friend class C_MDC_RejoinSessionsOpened;
640 void rejoin_open_ino_finish(inodeno_t ino, int ret);
641 void rejoin_open_sessions_finish(map<client_t,entity_inst_t> client_map,
642 map<client_t,uint64_t>& sseqmap);
643 bool process_imported_caps();
644 void choose_lock_states_and_reconnect_caps();
645 void prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t ino,
646 map<client_t,MClientSnap*>& splits);
647 void do_realm_invalidate_and_update_notify(CInode *in, int snapop, bool nosend=false);
648 void send_snaps(map<client_t,MClientSnap*>& splits);
649 Capability* rejoin_import_cap(CInode *in, client_t client, const cap_reconnect_t& icr, mds_rank_t frommds);
650 void finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snapid_t seq);
651 void try_reconnect_cap(CInode *in, Session *session);
652 void export_remaining_imported_caps();
653
654 // cap imports. delayed snap parent opens.
655 // realm inode -> client -> cap inodes needing to split to this realm
656 map<CInode*,set<CInode*> > missing_snap_parents;
657 map<client_t,set<CInode*> > delayed_imported_caps;
658
659 void do_cap_import(Session *session, CInode *in, Capability *cap,
660 uint64_t p_cap_id, ceph_seq_t p_seq, ceph_seq_t p_mseq,
661 int peer, int p_flags);
662 void do_delayed_cap_imports();
663 void rebuild_need_snapflush(CInode *head_in, SnapRealm *realm, client_t client,
664 snapid_t snap_follows);
665 void check_realm_past_parents(SnapRealm *realm, bool reconnect);
666 void open_snap_parents();
667
668 bool open_undef_inodes_dirfrags();
669 void opened_undef_inode(CInode *in);
670 void opened_undef_dirfrag(CDir *dir) {
671 rejoin_undef_dirfrags.erase(dir);
672 }
673
674 void reissue_all_caps();
675
676
677 friend class Locker;
678 friend class Migrator;
679 friend class MDBalancer;
680
681 // StrayManager needs to be able to remove_inode() from us
682 // when it is done purging
683 friend class StrayManager;
684
685 // File size recovery
686private:
687 RecoveryQueue recovery_queue;
688 void identify_files_to_recover();
689public:
690 void start_files_to_recover();
691 void do_file_recover();
692 void queue_file_recover(CInode *in);
693 void _queued_file_recover_cow(CInode *in, MutationRef& mut);
694
695 // subsystems
696 std::unique_ptr<Migrator> migrator;
697
698 public:
699 explicit MDCache(MDSRank *m, PurgeQueue &purge_queue_);
700 ~MDCache();
701
702 // debug
703 void log_stat();
704
705 // root inode
706 CInode *get_root() { return root; }
707 CInode *get_myin() { return myin; }
708
7c673cae
FG
709 size_t get_cache_size() { return lru.lru_get_size(); }
710
711 // trimming
181888fb
FG
712 bool trim(uint64_t count=0);
713private:
714 void trim_lru(uint64_t count, map<mds_rank_t, MCacheExpire*>& expiremap);
7c673cae
FG
715 bool trim_dentry(CDentry *dn, map<mds_rank_t, MCacheExpire*>& expiremap);
716 void trim_dirfrag(CDir *dir, CDir *con,
717 map<mds_rank_t, MCacheExpire*>& expiremap);
718 bool trim_inode(CDentry *dn, CInode *in, CDir *con,
719 map<mds_rank_t,class MCacheExpire*>& expiremap);
720 void send_expire_messages(map<mds_rank_t, MCacheExpire*>& expiremap);
721 void trim_non_auth(); // trim out trimmable non-auth items
181888fb 722public:
7c673cae
FG
723 bool trim_non_auth_subtree(CDir *directory);
724 void standby_trim_segment(LogSegment *ls);
725 void try_trim_non_auth_subtree(CDir *dir);
726 bool can_trim_non_auth_dirfrag(CDir *dir) {
727 return my_ambiguous_imports.count((dir)->dirfrag()) == 0 &&
728 uncommitted_slave_rename_olddir.count(dir->inode) == 0;
729 }
730
731 /**
732 * For all unreferenced inodes, dirs, dentries below an inode, compose
733 * expiry messages. This is used when giving up all replicas of entities
734 * for an MDS peer in the 'stopping' state, such that the peer can
735 * empty its cache and finish shutting down.
736 *
737 * We have to make sure we're only expiring un-referenced items to
738 * avoid interfering with ongoing stray-movement (we can't distinguish
739 * between the "moving my strays" and "waiting for my cache to empty"
740 * phases within 'stopping')
741 *
742 * @return false if we completed cleanly, true if caller should stop
743 * expiring because we hit something with refs.
744 */
745 bool expire_recursive(
746 CInode *in,
747 std::map<mds_rank_t, MCacheExpire*>& expiremap);
748
749 void trim_client_leases();
750 void check_memory_usage();
751
752 utime_t last_recall_state;
753
754 // shutdown
755private:
756 set<inodeno_t> shutdown_exported_strays;
757public:
758 void shutdown_start();
759 void shutdown_check();
760 bool shutdown_pass();
761 bool shutdown_export_strays();
762 bool shutdown(); // clear cache (ie at shutodwn)
763
764 bool did_shutdown_log_cap;
765
766 // inode_map
767 bool have_inode(vinodeno_t vino) {
b32b8144
FG
768 if (vino.snapid == CEPH_NOSNAP)
769 return inode_map.count(vino.ino) ? true : false;
770 else
771 return snap_inode_map.count(vino) ? true : false;
7c673cae
FG
772 }
773 bool have_inode(inodeno_t ino, snapid_t snap=CEPH_NOSNAP) {
774 return have_inode(vinodeno_t(ino, snap));
775 }
776 CInode* get_inode(vinodeno_t vino) {
b32b8144
FG
777 if (vino.snapid == CEPH_NOSNAP) {
778 auto p = inode_map.find(vino.ino);
779 if (p != inode_map.end())
780 return p->second;
781 } else {
782 auto p = snap_inode_map.find(vino);
783 if (p != snap_inode_map.end())
784 return p->second;
785 }
7c673cae
FG
786 return NULL;
787 }
788 CInode* get_inode(inodeno_t ino, snapid_t s=CEPH_NOSNAP) {
789 return get_inode(vinodeno_t(ino, s));
790 }
791
792 CDir* get_dirfrag(dirfrag_t df) {
793 CInode *in = get_inode(df.ino);
794 if (!in)
795 return NULL;
796 return in->get_dirfrag(df.frag);
797 }
798 CDir* get_dirfrag(inodeno_t ino, const string& dn) {
799 CInode *in = get_inode(ino);
800 if (!in)
801 return NULL;
802 frag_t fg = in->pick_dirfrag(dn);
803 return in->get_dirfrag(fg);
804 }
805 CDir* get_force_dirfrag(dirfrag_t df, bool replay) {
806 CInode *diri = get_inode(df.ino);
807 if (!diri)
808 return NULL;
809 CDir *dir = force_dir_fragment(diri, df.frag, replay);
810 if (!dir)
811 dir = diri->get_dirfrag(df.frag);
812 return dir;
813 }
814
815 MDSCacheObject *get_object(MDSCacheObjectInfo &info);
816
817
818
819 public:
820 void add_inode(CInode *in);
821
822 void remove_inode(CInode *in);
823 protected:
824 void touch_inode(CInode *in) {
825 if (in->get_parent_dn())
826 touch_dentry(in->get_projected_parent_dn());
827 }
828public:
829 void touch_dentry(CDentry *dn) {
31f18b77
FG
830 if (dn->state_test(CDentry::STATE_BOTTOMLRU)) {
831 bottom_lru.lru_midtouch(dn);
832 } else {
833 if (dn->is_auth())
834 lru.lru_touch(dn);
835 else
836 lru.lru_midtouch(dn);
837 }
7c673cae
FG
838 }
839 void touch_dentry_bottom(CDentry *dn) {
31f18b77
FG
840 if (dn->state_test(CDentry::STATE_BOTTOMLRU))
841 return;
7c673cae 842 lru.lru_bottouch(dn);
7c673cae
FG
843 }
844protected:
845
846 void inode_remove_replica(CInode *in, mds_rank_t rep, bool rejoin,
847 set<SimpleLock *>& gather_locks);
848 void dentry_remove_replica(CDentry *dn, mds_rank_t rep, set<SimpleLock *>& gather_locks);
849
850 void rename_file(CDentry *srcdn, CDentry *destdn);
851
852 public:
853 // truncate
854 void truncate_inode(CInode *in, LogSegment *ls);
855 void _truncate_inode(CInode *in, LogSegment *ls);
856 void truncate_inode_finish(CInode *in, LogSegment *ls);
857 void truncate_inode_logged(CInode *in, MutationRef& mut);
858
859 void add_recovered_truncate(CInode *in, LogSegment *ls);
860 void remove_recovered_truncate(CInode *in, LogSegment *ls);
861 void start_recovered_truncates();
862
863
864 public:
865 CDir *get_auth_container(CDir *in);
866 CDir *get_export_container(CDir *dir);
867 void find_nested_exports(CDir *dir, set<CDir*>& s);
868 void find_nested_exports_under(CDir *import, CDir *dir, set<CDir*>& s);
869
870
871private:
872 bool opening_root, open;
873 list<MDSInternalContextBase*> waiting_for_open;
874
875public:
876 void init_layouts();
877 void create_unlinked_system_inode(CInode *in, inodeno_t ino,
878 int mode) const;
879 CInode *create_system_inode(inodeno_t ino, int mode);
880 CInode *create_root_inode();
881
882 void create_empty_hierarchy(MDSGather *gather);
883 void create_mydir_hierarchy(MDSGather *gather);
884
885 bool is_open() { return open; }
886 void wait_for_open(MDSInternalContextBase *c) {
887 waiting_for_open.push_back(c);
888 }
889
890 void open_root_inode(MDSInternalContextBase *c);
891 void open_root();
892 void open_mydir_inode(MDSInternalContextBase *c);
893 void populate_mydir();
894
895 void _create_system_file(CDir *dir, const char *name, CInode *in, MDSInternalContextBase *fin);
896 void _create_system_file_finish(MutationRef& mut, CDentry *dn,
897 version_t dpv, MDSInternalContextBase *fin);
898
899 void open_foreign_mdsdir(inodeno_t ino, MDSInternalContextBase *c);
900 CDir *get_stray_dir(CInode *in);
901 CDentry *get_or_create_stray_dentry(CInode *in);
902
903 MDSInternalContextBase *_get_waiter(MDRequestRef& mdr, Message *req, MDSInternalContextBase *fin);
904
905 /**
906 * Find the given dentry (and whether it exists or not), its ancestors,
907 * and get them all into memory and usable on this MDS. This function
908 * makes a best-effort attempt to load everything; if it needs to
909 * go away and do something then it will put the request on a waitlist.
910 * It prefers the mdr, then the req, then the fin. (At least one of these
911 * must be non-null.)
912 *
913 * At least one of the params mdr, req, and fin must be non-null.
914 *
915 * @param mdr The MDRequest associated with the path. Can be null.
916 * @param req The Message associated with the path. Can be null.
917 * @param fin The Context associated with the path. Can be null.
918 * @param path The path to traverse to.
919 * @param pdnvec Data return parameter -- on success, contains a
920 * vector of dentries. On failure, is either empty or contains the
921 * full trace of traversable dentries.
922 * @param pin Data return parameter -- if successful, points to the inode
923 * associated with filepath. If unsuccessful, is null.
924 * @param onfail Specifies different lookup failure behaviors. If set to
925 * MDS_TRAVERSE_DISCOVERXLOCK, path_traverse will succeed on null
926 * dentries (instead of returning -ENOENT). If set to
927 * MDS_TRAVERSE_FORWARD, it will forward the request to the auth
928 * MDS if that becomes appropriate (ie, if it doesn't know the contents
929 * of a directory). If set to MDS_TRAVERSE_DISCOVER, it
930 * will attempt to look up the path from a different MDS (and bring them
931 * into its cache as replicas).
932 *
933 * @returns 0 on success, 1 on "not done yet", 2 on "forwarding", -errno otherwise.
934 * If it returns 1, the requester associated with this call has been placed
935 * on the appropriate waitlist, and it should unwind itself and back out.
936 * If it returns 2 the request has been forwarded, and again the requester
937 * should unwind itself and back out.
938 */
939 int path_traverse(MDRequestRef& mdr, Message *req, MDSInternalContextBase *fin, const filepath& path,
940 vector<CDentry*> *pdnvec, CInode **pin, int onfail);
941
942 CInode *cache_traverse(const filepath& path);
943
944 void open_remote_dirfrag(CInode *diri, frag_t fg, MDSInternalContextBase *fin);
945 CInode *get_dentry_inode(CDentry *dn, MDRequestRef& mdr, bool projected=false);
946
947 bool parallel_fetch(map<inodeno_t,filepath>& pathmap, set<inodeno_t>& missing);
948 bool parallel_fetch_traverse_dir(inodeno_t ino, filepath& path,
949 set<CDir*>& fetch_queue, set<inodeno_t>& missing,
950 C_GatherBuilder &gather_bld);
951
952 void open_remote_dentry(CDentry *dn, bool projected, MDSInternalContextBase *fin,
953 bool want_xlocked=false);
954 void _open_remote_dentry_finish(CDentry *dn, inodeno_t ino, MDSInternalContextBase *fin,
955 bool want_xlocked, int r);
956
957 void make_trace(vector<CDentry*>& trace, CInode *in);
958
959protected:
960 struct open_ino_info_t {
961 vector<inode_backpointer_t> ancestors;
962 set<mds_rank_t> checked;
963 mds_rank_t checking;
964 mds_rank_t auth_hint;
965 bool check_peers;
966 bool fetch_backtrace;
967 bool discover;
968 bool want_replica;
969 bool want_xlocked;
970 version_t tid;
971 int64_t pool;
972 int last_err;
973 list<MDSInternalContextBase*> waiters;
974 open_ino_info_t() : checking(MDS_RANK_NONE), auth_hint(MDS_RANK_NONE),
975 check_peers(true), fetch_backtrace(true), discover(false),
976 want_replica(false), want_xlocked(false), tid(0), pool(-1),
977 last_err(0) {}
978 };
979 ceph_tid_t open_ino_last_tid;
980 map<inodeno_t,open_ino_info_t> opening_inodes;
981
982 void _open_ino_backtrace_fetched(inodeno_t ino, bufferlist& bl, int err);
983 void _open_ino_parent_opened(inodeno_t ino, int ret);
984 void _open_ino_traverse_dir(inodeno_t ino, open_ino_info_t& info, int err);
985 void _open_ino_fetch_dir(inodeno_t ino, MMDSOpenIno *m, CDir *dir, bool parent);
986 int open_ino_traverse_dir(inodeno_t ino, MMDSOpenIno *m,
987 vector<inode_backpointer_t>& ancestors,
988 bool discover, bool want_xlocked, mds_rank_t *hint);
989 void open_ino_finish(inodeno_t ino, open_ino_info_t& info, int err);
990 void do_open_ino(inodeno_t ino, open_ino_info_t& info, int err);
991 void do_open_ino_peer(inodeno_t ino, open_ino_info_t& info);
992 void handle_open_ino(MMDSOpenIno *m, int err=0);
993 void handle_open_ino_reply(MMDSOpenInoReply *m);
994 friend class C_IO_MDC_OpenInoBacktraceFetched;
995 friend struct C_MDC_OpenInoTraverseDir;
996 friend struct C_MDC_OpenInoParentOpened;
997
998public:
999 void kick_open_ino_peers(mds_rank_t who);
1000 void open_ino(inodeno_t ino, int64_t pool, MDSInternalContextBase *fin,
1001 bool want_replica=true, bool want_xlocked=false);
1002
1003 // -- find_ino_peer --
1004 struct find_ino_peer_info_t {
1005 inodeno_t ino;
1006 ceph_tid_t tid;
1007 MDSInternalContextBase *fin;
1008 mds_rank_t hint;
1009 mds_rank_t checking;
1010 set<mds_rank_t> checked;
1011
1012 find_ino_peer_info_t() : tid(0), fin(NULL), hint(MDS_RANK_NONE), checking(MDS_RANK_NONE) {}
1013 };
1014
1015 map<ceph_tid_t, find_ino_peer_info_t> find_ino_peer;
1016 ceph_tid_t find_ino_peer_last_tid;
1017
1018 void find_ino_peers(inodeno_t ino, MDSInternalContextBase *c, mds_rank_t hint=MDS_RANK_NONE);
1019 void _do_find_ino_peer(find_ino_peer_info_t& fip);
1020 void handle_find_ino(MMDSFindIno *m);
1021 void handle_find_ino_reply(MMDSFindInoReply *m);
1022 void kick_find_ino_peers(mds_rank_t who);
1023
1024 // -- snaprealms --
1025public:
1026 void snaprealm_create(MDRequestRef& mdr, CInode *in);
1027 void _snaprealm_create_finish(MDRequestRef& mdr, MutationRef& mut, CInode *in);
1028
1029 // -- stray --
1030public:
7c673cae
FG
1031 void fetch_backtrace(inodeno_t ino, int64_t pool, bufferlist& bl, Context *fin);
1032 uint64_t get_num_strays() const { return stray_manager.get_num_strays(); }
1033
1034protected:
1035 void scan_stray_dir(dirfrag_t next=dirfrag_t());
1036 StrayManager stray_manager;
1037 friend struct C_MDC_RetryScanStray;
1038 friend class C_IO_MDC_FetchedBacktrace;
1039
1040 // == messages ==
1041 public:
1042 void dispatch(Message *m);
1043
1044 protected:
1045 // -- replicas --
1046 void handle_discover(MDiscover *dis);
1047 void handle_discover_reply(MDiscoverReply *m);
1048 friend class C_MDC_Join;
1049
1050public:
b32b8144
FG
1051 void replicate_dir(CDir *dir, mds_rank_t to, bufferlist& bl);
1052 void replicate_dentry(CDentry *dn, mds_rank_t to, bufferlist& bl);
7c673cae 1053 void replicate_inode(CInode *in, mds_rank_t to, bufferlist& bl,
b32b8144 1054 uint64_t features);
7c673cae
FG
1055
1056 CDir* add_replica_dir(bufferlist::iterator& p, CInode *diri, mds_rank_t from, list<MDSInternalContextBase*>& finished);
7c673cae
FG
1057 CDentry *add_replica_dentry(bufferlist::iterator& p, CDir *dir, list<MDSInternalContextBase*>& finished);
1058 CInode *add_replica_inode(bufferlist::iterator& p, CDentry *dn, list<MDSInternalContextBase*>& finished);
1059
1060 void replicate_stray(CDentry *straydn, mds_rank_t who, bufferlist& bl);
1061 CDentry *add_replica_stray(bufferlist &bl, mds_rank_t from);
1062
1063 // -- namespace --
1064public:
1065 void send_dentry_link(CDentry *dn, MDRequestRef& mdr);
1066 void send_dentry_unlink(CDentry *dn, CDentry *straydn, MDRequestRef& mdr);
1067protected:
1068 void handle_dentry_link(MDentryLink *m);
1069 void handle_dentry_unlink(MDentryUnlink *m);
1070
1071
1072 // -- fragmenting --
1073private:
1074 struct ufragment {
1075 int bits;
1076 bool committed;
1077 LogSegment *ls;
1078 list<MDSInternalContextBase*> waiters;
1079 list<frag_t> old_frags;
1080 bufferlist rollback;
1081 ufragment() : bits(0), committed(false), ls(NULL) {}
1082 };
1083 map<dirfrag_t, ufragment> uncommitted_fragments;
1084
1085 struct fragment_info_t {
1086 int bits;
1087 list<CDir*> dirs;
1088 list<CDir*> resultfrags;
1089 MDRequestRef mdr;
1090 // for deadlock detection
1091 bool all_frozen;
1092 utime_t last_cum_auth_pins_change;
1093 int last_cum_auth_pins;
1094 int num_remote_waiters; // number of remote authpin waiters
1095 fragment_info_t() : bits(0), all_frozen(false), last_cum_auth_pins(0), num_remote_waiters(0) {}
1096 bool is_fragmenting() { return !resultfrags.empty(); }
1097 };
1098 map<dirfrag_t,fragment_info_t> fragments;
1099
1100 void adjust_dir_fragments(CInode *diri, frag_t basefrag, int bits,
1101 list<CDir*>& frags, list<MDSInternalContextBase*>& waiters, bool replay);
1102 void adjust_dir_fragments(CInode *diri,
1103 list<CDir*>& srcfrags,
1104 frag_t basefrag, int bits,
1105 list<CDir*>& resultfrags,
1106 list<MDSInternalContextBase*>& waiters,
1107 bool replay);
1108 CDir *force_dir_fragment(CInode *diri, frag_t fg, bool replay=true);
1109 void get_force_dirfrag_bound_set(vector<dirfrag_t>& dfs, set<CDir*>& bounds);
1110
1111 bool can_fragment(CInode *diri, list<CDir*>& dirs);
1112 void fragment_freeze_dirs(list<CDir*>& dirs);
1113 void fragment_mark_and_complete(MDRequestRef& mdr);
1114 void fragment_frozen(MDRequestRef& mdr, int r);
1115 void fragment_unmark_unfreeze_dirs(list<CDir*>& dirs);
1116 void dispatch_fragment_dir(MDRequestRef& mdr);
1117 void _fragment_logged(MDRequestRef& mdr);
1118 void _fragment_stored(MDRequestRef& mdr);
1119 void _fragment_committed(dirfrag_t f, list<CDir*>& resultfrags);
1120 void _fragment_finish(dirfrag_t f, list<CDir*>& resultfrags);
1121
1122 friend class EFragment;
1123 friend class C_MDC_FragmentFrozen;
1124 friend class C_MDC_FragmentMarking;
1125 friend class C_MDC_FragmentPrep;
1126 friend class C_MDC_FragmentStore;
1127 friend class C_MDC_FragmentCommit;
1128 friend class C_IO_MDC_FragmentFinish;
1129
1130 void handle_fragment_notify(MMDSFragmentNotify *m);
1131
1132 void add_uncommitted_fragment(dirfrag_t basedirfrag, int bits, list<frag_t>& old_frag,
1133 LogSegment *ls, bufferlist *rollback=NULL);
1134 void finish_uncommitted_fragment(dirfrag_t basedirfrag, int op);
1135 void rollback_uncommitted_fragment(dirfrag_t basedirfrag, list<frag_t>& old_frags);
1136public:
1137 void wait_for_uncommitted_fragment(dirfrag_t dirfrag, MDSInternalContextBase *c) {
1138 assert(uncommitted_fragments.count(dirfrag));
1139 uncommitted_fragments[dirfrag].waiters.push_back(c);
1140 }
1141 void split_dir(CDir *dir, int byn);
1142 void merge_dir(CInode *diri, frag_t fg);
1143 void rollback_uncommitted_fragments();
1144
1145 void find_stale_fragment_freeze();
1146 void fragment_freeze_inc_num_waiters(CDir *dir);
1147 bool fragment_are_all_frozen(CDir *dir);
1148 int get_num_fragmenting_dirs() { return fragments.size(); }
1149
1150 // -- updates --
1151 //int send_inode_updates(CInode *in);
1152 //void handle_inode_update(MInodeUpdate *m);
1153
1154 int send_dir_updates(CDir *in, bool bcast=false);
1155 void handle_dir_update(MDirUpdate *m);
1156
1157 // -- cache expiration --
1158 void handle_cache_expire(MCacheExpire *m);
1159 void process_delayed_expire(CDir *dir);
1160 void discard_delayed_expire(CDir *dir);
1161
1162protected:
31f18b77 1163 int dump_cache(const char *fn, Formatter *f,
7c673cae
FG
1164 const std::string& dump_root = "",
1165 int depth = -1);
1166public:
31f18b77
FG
1167 int dump_cache() { return dump_cache(NULL, NULL); }
1168 int dump_cache(const std::string &filename);
1169 int dump_cache(Formatter *f);
1170 int dump_cache(const std::string& dump_root, int depth, Formatter *f);
7c673cae 1171
181888fb
FG
1172 int cache_status(Formatter *f);
1173
7c673cae
FG
1174 void dump_resolve_status(Formatter *f) const;
1175 void dump_rejoin_status(Formatter *f) const;
1176
1177 // == crap fns ==
1178 public:
1179 void show_cache();
1180 void show_subtrees(int dbl=10);
1181
1182 CInode *hack_pick_random_inode() {
1183 assert(!inode_map.empty());
1184 int n = rand() % inode_map.size();
b32b8144 1185 auto p = inode_map.begin();
7c673cae
FG
1186 while (n--) ++p;
1187 return p->second;
1188 }
1189
1190protected:
1191 void flush_dentry_work(MDRequestRef& mdr);
1192 /**
1193 * Resolve path to a dentry and pass it onto the ScrubStack.
1194 *
1195 * TODO: return enough information to the original mdr formatter
1196 * and completion that they can subsequeuntly check the progress of
1197 * this scrub (we won't block them on a whole scrub as it can take a very
1198 * long time)
1199 */
1200 void enqueue_scrub_work(MDRequestRef& mdr);
1201 void repair_inode_stats_work(MDRequestRef& mdr);
1202 void repair_dirfrag_stats_work(MDRequestRef& mdr);
1203 friend class C_MDC_RepairDirfragStats;
1204public:
1205 void flush_dentry(const string& path, Context *fin);
1206 /**
1207 * Create and start an OP_ENQUEUE_SCRUB
1208 */
1209 void enqueue_scrub(const string& path, const std::string &tag,
1210 bool force, bool recursive, bool repair,
1211 Formatter *f, Context *fin);
1212 void repair_inode_stats(CInode *diri);
1213 void repair_dirfrag_stats(CDir *dir);
1214
1215public:
1216 /* Because exports may fail, this set lets us keep track of inodes that need exporting. */
1217 std::set<CInode *> export_pin_queue;
1218};
1219
1220class C_MDS_RetryRequest : public MDSInternalContext {
1221 MDCache *cache;
1222 MDRequestRef mdr;
1223 public:
1224 C_MDS_RetryRequest(MDCache *c, MDRequestRef& r);
1225 void finish(int r) override;
1226};
1227
1228#endif