]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | /* | |
4 | * Ceph - scalable distributed file system | |
5 | * | |
6 | * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> | |
7 | * | |
8 | * This is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License version 2.1, as published by the Free Software | |
11 | * Foundation. See file COPYING. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include "include/int_types.h" | |
16 | #include "common/errno.h" | |
17 | ||
18 | #include <string> | |
19 | #include <stdio.h> | |
20 | ||
21 | #include "CInode.h" | |
22 | #include "CDir.h" | |
23 | #include "CDentry.h" | |
24 | ||
25 | #include "MDSRank.h" | |
26 | #include "MDCache.h" | |
27 | #include "MDLog.h" | |
28 | #include "Locker.h" | |
29 | #include "Mutation.h" | |
30 | ||
31 | #include "events/EUpdate.h" | |
32 | ||
33 | #include "osdc/Objecter.h" | |
34 | ||
35 | #include "snap.h" | |
36 | ||
37 | #include "LogSegment.h" | |
38 | ||
39 | #include "common/Clock.h" | |
40 | ||
41 | #include "messages/MLock.h" | |
42 | #include "messages/MClientCaps.h" | |
43 | ||
44 | #include "common/config.h" | |
45 | #include "global/global_context.h" | |
46 | #include "include/assert.h" | |
47 | ||
48 | #include "mds/MDSContinuation.h" | |
49 | #include "mds/InoTable.h" | |
50 | ||
51 | #define dout_context g_ceph_context | |
52 | #define dout_subsys ceph_subsys_mds | |
53 | #undef dout_prefix | |
54 | #define dout_prefix *_dout << "mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") " | |
55 | ||
56 | ||
57 | class CInodeIOContext : public MDSIOContextBase | |
58 | { | |
59 | protected: | |
60 | CInode *in; | |
61 | MDSRank *get_mds() override {return in->mdcache->mds;} | |
62 | public: | |
63 | explicit CInodeIOContext(CInode *in_) : in(in_) { | |
64 | assert(in != NULL); | |
65 | } | |
66 | }; | |
67 | ||
68 | ||
69 | LockType CInode::versionlock_type(CEPH_LOCK_IVERSION); | |
70 | LockType CInode::authlock_type(CEPH_LOCK_IAUTH); | |
71 | LockType CInode::linklock_type(CEPH_LOCK_ILINK); | |
72 | LockType CInode::dirfragtreelock_type(CEPH_LOCK_IDFT); | |
73 | LockType CInode::filelock_type(CEPH_LOCK_IFILE); | |
74 | LockType CInode::xattrlock_type(CEPH_LOCK_IXATTR); | |
75 | LockType CInode::snaplock_type(CEPH_LOCK_ISNAP); | |
76 | LockType CInode::nestlock_type(CEPH_LOCK_INEST); | |
77 | LockType CInode::flocklock_type(CEPH_LOCK_IFLOCK); | |
78 | LockType CInode::policylock_type(CEPH_LOCK_IPOLICY); | |
79 | ||
80 | //int cinode_pins[CINODE_NUM_PINS]; // counts | |
81 | ostream& CInode::print_db_line_prefix(ostream& out) | |
82 | { | |
83 | return out << ceph_clock_now() << " mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") "; | |
84 | } | |
85 | ||
86 | /* | |
87 | * write caps and lock ids | |
88 | */ | |
89 | struct cinode_lock_info_t cinode_lock_info[] = { | |
90 | { CEPH_LOCK_IFILE, CEPH_CAP_ANY_FILE_WR }, | |
91 | { CEPH_LOCK_IAUTH, CEPH_CAP_AUTH_EXCL }, | |
92 | { CEPH_LOCK_ILINK, CEPH_CAP_LINK_EXCL }, | |
93 | { CEPH_LOCK_IXATTR, CEPH_CAP_XATTR_EXCL }, | |
94 | }; | |
95 | int num_cinode_locks = sizeof(cinode_lock_info) / sizeof(cinode_lock_info[0]); | |
96 | ||
97 | ||
98 | ||
99 | ostream& operator<<(ostream& out, const CInode& in) | |
100 | { | |
101 | string path; | |
102 | in.make_path_string(path, true); | |
103 | ||
104 | out << "[inode " << in.inode.ino; | |
105 | out << " [" | |
106 | << (in.is_multiversion() ? "...":"") | |
107 | << in.first << "," << in.last << "]"; | |
108 | out << " " << path << (in.is_dir() ? "/":""); | |
109 | ||
110 | if (in.is_auth()) { | |
111 | out << " auth"; | |
112 | if (in.is_replicated()) | |
113 | out << in.get_replicas(); | |
114 | } else { | |
115 | mds_authority_t a = in.authority(); | |
116 | out << " rep@" << a.first; | |
117 | if (a.second != CDIR_AUTH_UNKNOWN) | |
118 | out << "," << a.second; | |
119 | out << "." << in.get_replica_nonce(); | |
120 | } | |
121 | ||
122 | if (in.is_symlink()) | |
123 | out << " symlink='" << in.symlink << "'"; | |
124 | if (in.is_dir() && !in.dirfragtree.empty()) | |
125 | out << " " << in.dirfragtree; | |
126 | ||
127 | out << " v" << in.get_version(); | |
128 | if (in.get_projected_version() > in.get_version()) | |
129 | out << " pv" << in.get_projected_version(); | |
130 | ||
131 | if (in.is_auth_pinned()) { | |
132 | out << " ap=" << in.get_num_auth_pins() << "+" << in.get_num_nested_auth_pins(); | |
133 | #ifdef MDS_AUTHPIN_SET | |
134 | out << "(" << in.auth_pin_set << ")"; | |
135 | #endif | |
136 | } | |
137 | ||
138 | if (in.snaprealm) | |
139 | out << " snaprealm=" << in.snaprealm; | |
140 | ||
141 | if (in.state_test(CInode::STATE_AMBIGUOUSAUTH)) out << " AMBIGAUTH"; | |
142 | if (in.state_test(CInode::STATE_NEEDSRECOVER)) out << " needsrecover"; | |
143 | if (in.state_test(CInode::STATE_RECOVERING)) out << " recovering"; | |
144 | if (in.state_test(CInode::STATE_DIRTYPARENT)) out << " dirtyparent"; | |
145 | if (in.state_test(CInode::STATE_MISSINGOBJS)) out << " missingobjs"; | |
146 | if (in.is_freezing_inode()) out << " FREEZING=" << in.auth_pin_freeze_allowance; | |
147 | if (in.is_frozen_inode()) out << " FROZEN"; | |
148 | if (in.is_frozen_auth_pin()) out << " FROZEN_AUTHPIN"; | |
149 | ||
150 | const inode_t *pi = in.get_projected_inode(); | |
151 | if (pi->is_truncating()) | |
152 | out << " truncating(" << pi->truncate_from << " to " << pi->truncate_size << ")"; | |
153 | ||
154 | if (in.inode.is_dir()) { | |
155 | out << " " << in.inode.dirstat; | |
156 | if (g_conf->mds_debug_scatterstat && in.is_projected()) { | |
157 | const inode_t *pi = in.get_projected_inode(); | |
158 | out << "->" << pi->dirstat; | |
159 | } | |
160 | } else { | |
161 | out << " s=" << in.inode.size; | |
162 | if (in.inode.nlink != 1) | |
163 | out << " nl=" << in.inode.nlink; | |
164 | } | |
165 | ||
166 | // rstat | |
167 | out << " " << in.inode.rstat; | |
168 | if (!(in.inode.rstat == in.inode.accounted_rstat)) | |
169 | out << "/" << in.inode.accounted_rstat; | |
170 | if (g_conf->mds_debug_scatterstat && in.is_projected()) { | |
171 | const inode_t *pi = in.get_projected_inode(); | |
172 | out << "->" << pi->rstat; | |
173 | if (!(pi->rstat == pi->accounted_rstat)) | |
174 | out << "/" << pi->accounted_rstat; | |
175 | } | |
176 | ||
177 | if (!in.client_need_snapflush.empty()) | |
178 | out << " need_snapflush=" << in.client_need_snapflush; | |
179 | ||
180 | ||
181 | // locks | |
182 | if (!in.authlock.is_sync_and_unlocked()) | |
183 | out << " " << in.authlock; | |
184 | if (!in.linklock.is_sync_and_unlocked()) | |
185 | out << " " << in.linklock; | |
186 | if (in.inode.is_dir()) { | |
187 | if (!in.dirfragtreelock.is_sync_and_unlocked()) | |
188 | out << " " << in.dirfragtreelock; | |
189 | if (!in.snaplock.is_sync_and_unlocked()) | |
190 | out << " " << in.snaplock; | |
191 | if (!in.nestlock.is_sync_and_unlocked()) | |
192 | out << " " << in.nestlock; | |
193 | if (!in.policylock.is_sync_and_unlocked()) | |
194 | out << " " << in.policylock; | |
195 | } else { | |
196 | if (!in.flocklock.is_sync_and_unlocked()) | |
197 | out << " " << in.flocklock; | |
198 | } | |
199 | if (!in.filelock.is_sync_and_unlocked()) | |
200 | out << " " << in.filelock; | |
201 | if (!in.xattrlock.is_sync_and_unlocked()) | |
202 | out << " " << in.xattrlock; | |
203 | if (!in.versionlock.is_sync_and_unlocked()) | |
204 | out << " " << in.versionlock; | |
205 | ||
206 | // hack: spit out crap on which clients have caps | |
207 | if (in.inode.client_ranges.size()) | |
208 | out << " cr=" << in.inode.client_ranges; | |
209 | ||
210 | if (!in.get_client_caps().empty()) { | |
211 | out << " caps={"; | |
212 | for (map<client_t,Capability*>::const_iterator it = in.get_client_caps().begin(); | |
213 | it != in.get_client_caps().end(); | |
214 | ++it) { | |
215 | if (it != in.get_client_caps().begin()) out << ","; | |
216 | out << it->first << "=" | |
217 | << ccap_string(it->second->pending()); | |
218 | if (it->second->issued() != it->second->pending()) | |
219 | out << "/" << ccap_string(it->second->issued()); | |
220 | out << "/" << ccap_string(it->second->wanted()) | |
221 | << "@" << it->second->get_last_sent(); | |
222 | } | |
223 | out << "}"; | |
224 | if (in.get_loner() >= 0 || in.get_wanted_loner() >= 0) { | |
225 | out << ",l=" << in.get_loner(); | |
226 | if (in.get_loner() != in.get_wanted_loner()) | |
227 | out << "(" << in.get_wanted_loner() << ")"; | |
228 | } | |
229 | } | |
230 | if (!in.get_mds_caps_wanted().empty()) { | |
231 | out << " mcw={"; | |
232 | for (compact_map<int,int>::const_iterator p = in.get_mds_caps_wanted().begin(); | |
233 | p != in.get_mds_caps_wanted().end(); | |
234 | ++p) { | |
235 | if (p != in.get_mds_caps_wanted().begin()) | |
236 | out << ','; | |
237 | out << p->first << '=' << ccap_string(p->second); | |
238 | } | |
239 | out << '}'; | |
240 | } | |
241 | ||
242 | if (in.get_num_ref()) { | |
243 | out << " |"; | |
244 | in.print_pin_set(out); | |
245 | } | |
246 | ||
247 | if (in.inode.export_pin != MDS_RANK_NONE) { | |
248 | out << " export_pin=" << in.inode.export_pin; | |
249 | } | |
250 | ||
251 | out << " " << ∈ | |
252 | out << "]"; | |
253 | return out; | |
254 | } | |
255 | ||
256 | ostream& operator<<(ostream& out, const CInode::scrub_stamp_info_t& si) | |
257 | { | |
258 | out << "{scrub_start_version: " << si.scrub_start_version | |
259 | << ", scrub_start_stamp: " << si.scrub_start_stamp | |
260 | << ", last_scrub_version: " << si.last_scrub_version | |
261 | << ", last_scrub_stamp: " << si.last_scrub_stamp; | |
262 | return out; | |
263 | } | |
264 | ||
265 | ||
266 | ||
267 | void CInode::print(ostream& out) | |
268 | { | |
269 | out << *this; | |
270 | } | |
271 | ||
272 | ||
273 | ||
274 | void CInode::add_need_snapflush(CInode *snapin, snapid_t snapid, client_t client) | |
275 | { | |
276 | dout(10) << "add_need_snapflush client." << client << " snapid " << snapid << " on " << snapin << dendl; | |
277 | ||
278 | if (client_need_snapflush.empty()) { | |
279 | get(CInode::PIN_NEEDSNAPFLUSH); | |
280 | ||
281 | // FIXME: this is non-optimal, as we'll block freezes/migrations for potentially | |
282 | // long periods waiting for clients to flush their snaps. | |
283 | auth_pin(this); // pin head inode... | |
284 | } | |
285 | ||
286 | set<client_t>& clients = client_need_snapflush[snapid]; | |
287 | if (clients.empty()) | |
288 | snapin->auth_pin(this); // ...and pin snapped/old inode! | |
289 | ||
290 | clients.insert(client); | |
291 | } | |
292 | ||
293 | void CInode::remove_need_snapflush(CInode *snapin, snapid_t snapid, client_t client) | |
294 | { | |
295 | dout(10) << "remove_need_snapflush client." << client << " snapid " << snapid << " on " << snapin << dendl; | |
296 | compact_map<snapid_t, std::set<client_t> >::iterator p = client_need_snapflush.find(snapid); | |
297 | if (p == client_need_snapflush.end()) { | |
298 | dout(10) << " snapid not found" << dendl; | |
299 | return; | |
300 | } | |
301 | if (!p->second.count(client)) { | |
302 | dout(10) << " client not found" << dendl; | |
303 | return; | |
304 | } | |
305 | p->second.erase(client); | |
306 | if (p->second.empty()) { | |
307 | client_need_snapflush.erase(p); | |
308 | snapin->auth_unpin(this); | |
309 | ||
310 | if (client_need_snapflush.empty()) { | |
311 | put(CInode::PIN_NEEDSNAPFLUSH); | |
312 | auth_unpin(this); | |
313 | } | |
314 | } | |
315 | } | |
316 | ||
317 | bool CInode::split_need_snapflush(CInode *cowin, CInode *in) | |
318 | { | |
319 | dout(10) << "split_need_snapflush [" << cowin->first << "," << cowin->last << "] for " << *cowin << dendl; | |
320 | bool need_flush = false; | |
321 | for (compact_map<snapid_t, set<client_t> >::iterator p = client_need_snapflush.lower_bound(cowin->first); | |
322 | p != client_need_snapflush.end() && p->first < in->first; ) { | |
323 | compact_map<snapid_t, set<client_t> >::iterator q = p; | |
324 | ++p; | |
325 | assert(!q->second.empty()); | |
326 | if (cowin->last >= q->first) { | |
327 | cowin->auth_pin(this); | |
328 | need_flush = true; | |
329 | } else | |
330 | client_need_snapflush.erase(q); | |
331 | in->auth_unpin(this); | |
332 | } | |
333 | return need_flush; | |
334 | } | |
335 | ||
336 | void CInode::mark_dirty_rstat() | |
337 | { | |
338 | if (!state_test(STATE_DIRTYRSTAT)) { | |
339 | dout(10) << "mark_dirty_rstat" << dendl; | |
340 | state_set(STATE_DIRTYRSTAT); | |
341 | get(PIN_DIRTYRSTAT); | |
224ce89b WB |
342 | CDentry *pdn = get_projected_parent_dn(); |
343 | if (pdn->is_auth()) { | |
344 | CDir *pdir = pdn->dir; | |
345 | pdir->dirty_rstat_inodes.push_back(&dirty_rstat_item); | |
346 | mdcache->mds->locker->mark_updated_scatterlock(&pdir->inode->nestlock); | |
347 | } else { | |
348 | // under cross-MDS rename. | |
349 | // DIRTYRSTAT flag will get cleared when rename finishes | |
350 | assert(state_test(STATE_AMBIGUOUSAUTH)); | |
351 | } | |
7c673cae FG |
352 | } |
353 | } | |
354 | void CInode::clear_dirty_rstat() | |
355 | { | |
356 | if (state_test(STATE_DIRTYRSTAT)) { | |
357 | dout(10) << "clear_dirty_rstat" << dendl; | |
358 | state_clear(STATE_DIRTYRSTAT); | |
359 | put(PIN_DIRTYRSTAT); | |
360 | dirty_rstat_item.remove_myself(); | |
361 | } | |
362 | } | |
363 | ||
364 | inode_t *CInode::project_inode(map<string,bufferptr> *px) | |
365 | { | |
366 | if (projected_nodes.empty()) { | |
367 | projected_nodes.push_back(new projected_inode_t(new inode_t(inode))); | |
368 | if (px) | |
369 | *px = xattrs; | |
370 | } else { | |
371 | projected_nodes.push_back(new projected_inode_t( | |
372 | new inode_t(*projected_nodes.back()->inode))); | |
373 | if (px) | |
374 | *px = *get_projected_xattrs(); | |
375 | } | |
376 | ||
377 | projected_inode_t &pi = *projected_nodes.back(); | |
378 | ||
379 | if (px) { | |
380 | pi.xattrs = px; | |
381 | ++num_projected_xattrs; | |
382 | } | |
383 | ||
384 | if (scrub_infop && scrub_infop->last_scrub_dirty) { | |
385 | pi.inode->last_scrub_stamp = scrub_infop->last_scrub_stamp; | |
386 | pi.inode->last_scrub_version = scrub_infop->last_scrub_version; | |
387 | scrub_infop->last_scrub_dirty = false; | |
388 | scrub_maybe_delete_info(); | |
389 | } | |
390 | dout(15) << "project_inode " << pi.inode << dendl; | |
391 | return pi.inode; | |
392 | } | |
393 | ||
394 | void CInode::pop_and_dirty_projected_inode(LogSegment *ls) | |
395 | { | |
396 | assert(!projected_nodes.empty()); | |
397 | dout(15) << "pop_and_dirty_projected_inode " << projected_nodes.front()->inode | |
398 | << " v" << projected_nodes.front()->inode->version << dendl; | |
399 | int64_t old_pool = inode.layout.pool_id; | |
400 | ||
401 | mark_dirty(projected_nodes.front()->inode->version, ls); | |
402 | inode = *projected_nodes.front()->inode; | |
403 | ||
404 | if (inode.is_backtrace_updated()) | |
405 | _mark_dirty_parent(ls, old_pool != inode.layout.pool_id); | |
406 | ||
407 | map<string,bufferptr> *px = projected_nodes.front()->xattrs; | |
408 | if (px) { | |
409 | --num_projected_xattrs; | |
410 | xattrs = *px; | |
411 | delete px; | |
412 | } | |
413 | ||
414 | if (projected_nodes.front()->snapnode) { | |
415 | pop_projected_snaprealm(projected_nodes.front()->snapnode); | |
416 | --num_projected_srnodes; | |
417 | } | |
418 | ||
419 | delete projected_nodes.front()->inode; | |
420 | delete projected_nodes.front(); | |
421 | ||
422 | projected_nodes.pop_front(); | |
423 | } | |
424 | ||
425 | sr_t *CInode::project_snaprealm(snapid_t snapid) | |
426 | { | |
427 | sr_t *cur_srnode = get_projected_srnode(); | |
428 | sr_t *new_srnode; | |
429 | ||
430 | if (cur_srnode) { | |
431 | new_srnode = new sr_t(*cur_srnode); | |
432 | } else { | |
433 | new_srnode = new sr_t(); | |
434 | new_srnode->created = snapid; | |
435 | new_srnode->current_parent_since = get_oldest_snap(); | |
436 | } | |
437 | dout(10) << "project_snaprealm " << new_srnode << dendl; | |
438 | projected_nodes.back()->snapnode = new_srnode; | |
439 | ++num_projected_srnodes; | |
440 | return new_srnode; | |
441 | } | |
442 | ||
443 | /* if newparent != parent, add parent to past_parents | |
444 | if parent DNE, we need to find what the parent actually is and fill that in */ | |
445 | void CInode::project_past_snaprealm_parent(SnapRealm *newparent) | |
446 | { | |
447 | sr_t *new_snap = project_snaprealm(); | |
448 | SnapRealm *oldparent; | |
449 | if (!snaprealm) { | |
450 | oldparent = find_snaprealm(); | |
451 | new_snap->seq = oldparent->get_newest_seq(); | |
452 | } | |
453 | else | |
454 | oldparent = snaprealm->parent; | |
455 | ||
456 | if (newparent != oldparent) { | |
457 | snapid_t oldparentseq = oldparent->get_newest_seq(); | |
458 | if (oldparentseq + 1 > new_snap->current_parent_since) { | |
459 | new_snap->past_parents[oldparentseq].ino = oldparent->inode->ino(); | |
460 | new_snap->past_parents[oldparentseq].first = new_snap->current_parent_since; | |
461 | } | |
462 | new_snap->current_parent_since = MAX(oldparentseq, newparent->get_last_created()) + 1; | |
463 | } | |
464 | } | |
465 | ||
466 | void CInode::pop_projected_snaprealm(sr_t *next_snaprealm) | |
467 | { | |
468 | assert(next_snaprealm); | |
469 | dout(10) << "pop_projected_snaprealm " << next_snaprealm | |
470 | << " seq" << next_snaprealm->seq << dendl; | |
471 | bool invalidate_cached_snaps = false; | |
472 | if (!snaprealm) { | |
473 | open_snaprealm(); | |
474 | } else if (next_snaprealm->past_parents.size() != | |
475 | snaprealm->srnode.past_parents.size()) { | |
476 | invalidate_cached_snaps = true; | |
477 | // re-open past parents | |
478 | snaprealm->_close_parents(); | |
479 | ||
480 | dout(10) << " realm " << *snaprealm << " past_parents " << snaprealm->srnode.past_parents | |
481 | << " -> " << next_snaprealm->past_parents << dendl; | |
482 | } | |
483 | snaprealm->srnode = *next_snaprealm; | |
484 | delete next_snaprealm; | |
485 | ||
486 | // we should be able to open these up (or have them already be open). | |
487 | bool ok = snaprealm->_open_parents(NULL); | |
488 | assert(ok); | |
489 | ||
490 | if (invalidate_cached_snaps) | |
491 | snaprealm->invalidate_cached_snaps(); | |
492 | ||
493 | if (snaprealm->parent) | |
494 | dout(10) << " realm " << *snaprealm << " parent " << *snaprealm->parent << dendl; | |
495 | } | |
496 | ||
497 | ||
498 | // ====== CInode ======= | |
499 | ||
500 | // dirfrags | |
501 | ||
502 | __u32 InodeStoreBase::hash_dentry_name(const string &dn) | |
503 | { | |
504 | int which = inode.dir_layout.dl_dir_hash; | |
505 | if (!which) | |
506 | which = CEPH_STR_HASH_LINUX; | |
507 | assert(ceph_str_hash_valid(which)); | |
508 | return ceph_str_hash(which, dn.data(), dn.length()); | |
509 | } | |
510 | ||
511 | frag_t InodeStoreBase::pick_dirfrag(const string& dn) | |
512 | { | |
513 | if (dirfragtree.empty()) | |
514 | return frag_t(); // avoid the string hash if we can. | |
515 | ||
516 | __u32 h = hash_dentry_name(dn); | |
517 | return dirfragtree[h]; | |
518 | } | |
519 | ||
520 | bool CInode::get_dirfrags_under(frag_t fg, list<CDir*>& ls) | |
521 | { | |
522 | bool all = true; | |
523 | list<frag_t> fglist; | |
524 | dirfragtree.get_leaves_under(fg, fglist); | |
525 | for (list<frag_t>::iterator p = fglist.begin(); p != fglist.end(); ++p) | |
526 | if (dirfrags.count(*p)) | |
527 | ls.push_back(dirfrags[*p]); | |
528 | else | |
529 | all = false; | |
530 | ||
531 | if (all) | |
532 | return all; | |
533 | ||
534 | fragtree_t tmpdft; | |
535 | tmpdft.force_to_leaf(g_ceph_context, fg); | |
536 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); p != dirfrags.end(); ++p) { | |
537 | tmpdft.force_to_leaf(g_ceph_context, p->first); | |
538 | if (fg.contains(p->first) && !dirfragtree.is_leaf(p->first)) | |
539 | ls.push_back(p->second); | |
540 | } | |
541 | ||
542 | all = true; | |
543 | tmpdft.get_leaves_under(fg, fglist); | |
544 | for (list<frag_t>::iterator p = fglist.begin(); p != fglist.end(); ++p) | |
545 | if (!dirfrags.count(*p)) { | |
546 | all = false; | |
547 | break; | |
548 | } | |
549 | ||
550 | return all; | |
551 | } | |
552 | ||
553 | void CInode::verify_dirfrags() | |
554 | { | |
555 | bool bad = false; | |
556 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); p != dirfrags.end(); ++p) { | |
557 | if (!dirfragtree.is_leaf(p->first)) { | |
558 | dout(0) << "have open dirfrag " << p->first << " but not leaf in " << dirfragtree | |
559 | << ": " << *p->second << dendl; | |
560 | bad = true; | |
561 | } | |
562 | } | |
563 | assert(!bad); | |
564 | } | |
565 | ||
566 | void CInode::force_dirfrags() | |
567 | { | |
568 | bool bad = false; | |
569 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); p != dirfrags.end(); ++p) { | |
570 | if (!dirfragtree.is_leaf(p->first)) { | |
571 | dout(0) << "have open dirfrag " << p->first << " but not leaf in " << dirfragtree | |
572 | << ": " << *p->second << dendl; | |
573 | bad = true; | |
574 | } | |
575 | } | |
576 | ||
577 | if (bad) { | |
578 | list<frag_t> leaves; | |
579 | dirfragtree.get_leaves(leaves); | |
580 | for (list<frag_t>::iterator p = leaves.begin(); p != leaves.end(); ++p) | |
581 | mdcache->get_force_dirfrag(dirfrag_t(ino(),*p), true); | |
582 | } | |
583 | ||
584 | verify_dirfrags(); | |
585 | } | |
586 | ||
587 | CDir *CInode::get_approx_dirfrag(frag_t fg) | |
588 | { | |
589 | CDir *dir = get_dirfrag(fg); | |
590 | if (dir) return dir; | |
591 | ||
592 | // find a child? | |
593 | list<CDir*> ls; | |
594 | get_dirfrags_under(fg, ls); | |
595 | if (!ls.empty()) | |
596 | return ls.front(); | |
597 | ||
598 | // try parents? | |
599 | while (fg.bits() > 0) { | |
600 | fg = fg.parent(); | |
601 | dir = get_dirfrag(fg); | |
602 | if (dir) return dir; | |
603 | } | |
604 | return NULL; | |
605 | } | |
606 | ||
607 | void CInode::get_dirfrags(list<CDir*>& ls) | |
608 | { | |
609 | // all dirfrags | |
610 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
611 | p != dirfrags.end(); | |
612 | ++p) | |
613 | ls.push_back(p->second); | |
614 | } | |
615 | void CInode::get_nested_dirfrags(list<CDir*>& ls) | |
616 | { | |
617 | // dirfrags in same subtree | |
618 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
619 | p != dirfrags.end(); | |
620 | ++p) | |
621 | if (!p->second->is_subtree_root()) | |
622 | ls.push_back(p->second); | |
623 | } | |
624 | void CInode::get_subtree_dirfrags(list<CDir*>& ls) | |
625 | { | |
626 | // dirfrags that are roots of new subtrees | |
627 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
628 | p != dirfrags.end(); | |
629 | ++p) | |
630 | if (p->second->is_subtree_root()) | |
631 | ls.push_back(p->second); | |
632 | } | |
633 | ||
634 | ||
635 | CDir *CInode::get_or_open_dirfrag(MDCache *mdcache, frag_t fg) | |
636 | { | |
637 | assert(is_dir()); | |
638 | ||
639 | // have it? | |
640 | CDir *dir = get_dirfrag(fg); | |
641 | if (!dir) { | |
642 | // create it. | |
643 | assert(is_auth() || mdcache->mds->is_any_replay()); | |
644 | dir = new CDir(this, fg, mdcache, is_auth()); | |
645 | add_dirfrag(dir); | |
646 | } | |
647 | return dir; | |
648 | } | |
649 | ||
650 | CDir *CInode::add_dirfrag(CDir *dir) | |
651 | { | |
652 | assert(dirfrags.count(dir->dirfrag().frag) == 0); | |
653 | dirfrags[dir->dirfrag().frag] = dir; | |
654 | ||
655 | if (stickydir_ref > 0) { | |
656 | dir->state_set(CDir::STATE_STICKY); | |
657 | dir->get(CDir::PIN_STICKY); | |
658 | } | |
659 | ||
660 | maybe_export_pin(); | |
661 | ||
662 | return dir; | |
663 | } | |
664 | ||
665 | void CInode::close_dirfrag(frag_t fg) | |
666 | { | |
667 | dout(14) << "close_dirfrag " << fg << dendl; | |
668 | assert(dirfrags.count(fg)); | |
669 | ||
670 | CDir *dir = dirfrags[fg]; | |
671 | dir->remove_null_dentries(); | |
672 | ||
673 | // clear dirty flag | |
674 | if (dir->is_dirty()) | |
675 | dir->mark_clean(); | |
676 | ||
677 | if (stickydir_ref > 0) { | |
678 | dir->state_clear(CDir::STATE_STICKY); | |
679 | dir->put(CDir::PIN_STICKY); | |
680 | } | |
681 | ||
682 | // dump any remaining dentries, for debugging purposes | |
683 | for (CDir::map_t::iterator p = dir->items.begin(); | |
684 | p != dir->items.end(); | |
685 | ++p) | |
686 | dout(14) << "close_dirfrag LEFTOVER dn " << *p->second << dendl; | |
687 | ||
688 | assert(dir->get_num_ref() == 0); | |
689 | delete dir; | |
690 | dirfrags.erase(fg); | |
691 | } | |
692 | ||
693 | void CInode::close_dirfrags() | |
694 | { | |
695 | while (!dirfrags.empty()) | |
696 | close_dirfrag(dirfrags.begin()->first); | |
697 | } | |
698 | ||
699 | bool CInode::has_subtree_root_dirfrag(int auth) | |
700 | { | |
701 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
702 | p != dirfrags.end(); | |
703 | ++p) | |
704 | if (p->second->is_subtree_root() && | |
705 | (auth == -1 || p->second->dir_auth.first == auth)) | |
706 | return true; | |
707 | return false; | |
708 | } | |
709 | ||
710 | bool CInode::has_subtree_or_exporting_dirfrag() | |
711 | { | |
712 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
713 | p != dirfrags.end(); | |
714 | ++p) | |
715 | if (p->second->is_subtree_root() || | |
716 | p->second->state_test(CDir::STATE_EXPORTING)) | |
717 | return true; | |
718 | return false; | |
719 | } | |
720 | ||
721 | void CInode::get_stickydirs() | |
722 | { | |
723 | if (stickydir_ref == 0) { | |
724 | get(PIN_STICKYDIRS); | |
725 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
726 | p != dirfrags.end(); | |
727 | ++p) { | |
728 | p->second->state_set(CDir::STATE_STICKY); | |
729 | p->second->get(CDir::PIN_STICKY); | |
730 | } | |
731 | } | |
732 | stickydir_ref++; | |
733 | } | |
734 | ||
735 | void CInode::put_stickydirs() | |
736 | { | |
737 | assert(stickydir_ref > 0); | |
738 | stickydir_ref--; | |
739 | if (stickydir_ref == 0) { | |
740 | put(PIN_STICKYDIRS); | |
741 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
742 | p != dirfrags.end(); | |
743 | ++p) { | |
744 | p->second->state_clear(CDir::STATE_STICKY); | |
745 | p->second->put(CDir::PIN_STICKY); | |
746 | } | |
747 | } | |
748 | } | |
749 | ||
750 | ||
751 | ||
752 | ||
753 | ||
754 | // pins | |
755 | ||
756 | void CInode::first_get() | |
757 | { | |
758 | // pin my dentry? | |
759 | if (parent) | |
760 | parent->get(CDentry::PIN_INODEPIN); | |
761 | } | |
762 | ||
763 | void CInode::last_put() | |
764 | { | |
765 | // unpin my dentry? | |
766 | if (parent) | |
767 | parent->put(CDentry::PIN_INODEPIN); | |
768 | } | |
769 | ||
770 | void CInode::_put() | |
771 | { | |
772 | if (get_num_ref() == (int)is_dirty() + (int)is_dirty_parent()) | |
773 | mdcache->maybe_eval_stray(this, true); | |
774 | } | |
775 | ||
776 | void CInode::add_remote_parent(CDentry *p) | |
777 | { | |
778 | if (remote_parents.empty()) | |
779 | get(PIN_REMOTEPARENT); | |
780 | remote_parents.insert(p); | |
781 | } | |
782 | void CInode::remove_remote_parent(CDentry *p) | |
783 | { | |
784 | remote_parents.erase(p); | |
785 | if (remote_parents.empty()) | |
786 | put(PIN_REMOTEPARENT); | |
787 | } | |
788 | ||
789 | ||
790 | ||
791 | ||
792 | CDir *CInode::get_parent_dir() | |
793 | { | |
794 | if (parent) | |
795 | return parent->dir; | |
796 | return NULL; | |
797 | } | |
798 | CDir *CInode::get_projected_parent_dir() | |
799 | { | |
800 | CDentry *p = get_projected_parent_dn(); | |
801 | if (p) | |
802 | return p->dir; | |
803 | return NULL; | |
804 | } | |
805 | CInode *CInode::get_parent_inode() | |
806 | { | |
807 | if (parent) | |
808 | return parent->dir->inode; | |
809 | return NULL; | |
810 | } | |
811 | ||
812 | bool CInode::is_projected_ancestor_of(CInode *other) | |
813 | { | |
814 | while (other) { | |
815 | if (other == this) | |
816 | return true; | |
817 | if (!other->get_projected_parent_dn()) | |
818 | break; | |
819 | other = other->get_projected_parent_dn()->get_dir()->get_inode(); | |
820 | } | |
821 | return false; | |
822 | } | |
823 | ||
824 | /* | |
825 | * Because a non-directory inode may have multiple links, the use_parent | |
826 | * argument allows selecting which parent to use for path construction. This | |
827 | * argument is only meaningful for the final component (i.e. the first of the | |
828 | * nested calls) because directories cannot have multiple hard links. If | |
829 | * use_parent is NULL and projected is true, the primary parent's projected | |
830 | * inode is used all the way up the path chain. Otherwise the primary parent | |
831 | * stable inode is used. | |
832 | */ | |
833 | void CInode::make_path_string(string& s, bool projected, const CDentry *use_parent) const | |
834 | { | |
835 | if (!use_parent) { | |
836 | use_parent = projected ? get_projected_parent_dn() : parent; | |
837 | } | |
838 | ||
839 | if (use_parent) { | |
840 | use_parent->make_path_string(s, projected); | |
841 | } else if (is_root()) { | |
842 | s = ""; | |
843 | } else if (is_mdsdir()) { | |
844 | char t[40]; | |
845 | uint64_t eino(ino()); | |
846 | eino -= MDS_INO_MDSDIR_OFFSET; | |
847 | snprintf(t, sizeof(t), "~mds%" PRId64, eino); | |
848 | s = t; | |
849 | } else { | |
850 | char n[40]; | |
851 | uint64_t eino(ino()); | |
852 | snprintf(n, sizeof(n), "#%" PRIx64, eino); | |
853 | s += n; | |
854 | } | |
855 | } | |
856 | ||
857 | void CInode::make_path(filepath& fp, bool projected) const | |
858 | { | |
859 | const CDentry *use_parent = projected ? get_projected_parent_dn() : parent; | |
860 | if (use_parent) { | |
861 | assert(!is_base()); | |
862 | use_parent->make_path(fp, projected); | |
863 | } else { | |
864 | fp = filepath(ino()); | |
865 | } | |
866 | } | |
867 | ||
868 | void CInode::name_stray_dentry(string& dname) | |
869 | { | |
870 | char s[20]; | |
871 | snprintf(s, sizeof(s), "%llx", (unsigned long long)inode.ino.val); | |
872 | dname = s; | |
873 | } | |
874 | ||
875 | version_t CInode::pre_dirty() | |
876 | { | |
877 | version_t pv; | |
878 | CDentry* _cdentry = get_projected_parent_dn(); | |
879 | if (_cdentry) { | |
880 | pv = _cdentry->pre_dirty(get_projected_version()); | |
881 | dout(10) << "pre_dirty " << pv << " (current v " << inode.version << ")" << dendl; | |
882 | } else { | |
883 | assert(is_base()); | |
884 | pv = get_projected_version() + 1; | |
885 | } | |
886 | // force update backtrace for old format inode (see inode_t::decode) | |
887 | if (inode.backtrace_version == 0 && !projected_nodes.empty()) { | |
888 | inode_t *pi = projected_nodes.back()->inode; | |
889 | if (pi->backtrace_version == 0) | |
890 | pi->update_backtrace(pv); | |
891 | } | |
892 | return pv; | |
893 | } | |
894 | ||
895 | void CInode::_mark_dirty(LogSegment *ls) | |
896 | { | |
897 | if (!state_test(STATE_DIRTY)) { | |
898 | state_set(STATE_DIRTY); | |
899 | get(PIN_DIRTY); | |
900 | assert(ls); | |
901 | } | |
902 | ||
903 | // move myself to this segment's dirty list | |
904 | if (ls) | |
905 | ls->dirty_inodes.push_back(&item_dirty); | |
906 | } | |
907 | ||
908 | void CInode::mark_dirty(version_t pv, LogSegment *ls) { | |
909 | ||
910 | dout(10) << "mark_dirty " << *this << dendl; | |
911 | ||
912 | /* | |
913 | NOTE: I may already be dirty, but this fn _still_ needs to be called so that | |
914 | the directory is (perhaps newly) dirtied, and so that parent_dir_version is | |
915 | updated below. | |
916 | */ | |
917 | ||
918 | // only auth can get dirty. "dirty" async data in replicas is relative to | |
919 | // filelock state, not the dirty flag. | |
920 | assert(is_auth()); | |
921 | ||
922 | // touch my private version | |
923 | assert(inode.version < pv); | |
924 | inode.version = pv; | |
925 | _mark_dirty(ls); | |
926 | ||
927 | // mark dentry too | |
928 | if (parent) | |
929 | parent->mark_dirty(pv, ls); | |
930 | } | |
931 | ||
932 | ||
933 | void CInode::mark_clean() | |
934 | { | |
935 | dout(10) << " mark_clean " << *this << dendl; | |
936 | if (state_test(STATE_DIRTY)) { | |
937 | state_clear(STATE_DIRTY); | |
938 | put(PIN_DIRTY); | |
939 | ||
940 | // remove myself from ls dirty list | |
941 | item_dirty.remove_myself(); | |
942 | } | |
943 | } | |
944 | ||
945 | ||
946 | // -------------- | |
947 | // per-inode storage | |
948 | // (currently for root inode only) | |
949 | ||
950 | struct C_IO_Inode_Stored : public CInodeIOContext { | |
951 | version_t version; | |
952 | Context *fin; | |
953 | C_IO_Inode_Stored(CInode *i, version_t v, Context *f) : CInodeIOContext(i), version(v), fin(f) {} | |
954 | void finish(int r) override { | |
955 | in->_stored(r, version, fin); | |
956 | } | |
957 | }; | |
958 | ||
959 | object_t InodeStoreBase::get_object_name(inodeno_t ino, frag_t fg, const char *suffix) | |
960 | { | |
961 | char n[60]; | |
962 | snprintf(n, sizeof(n), "%llx.%08llx%s", (long long unsigned)ino, (long long unsigned)fg, suffix ? suffix : ""); | |
963 | return object_t(n); | |
964 | } | |
965 | ||
966 | void CInode::store(MDSInternalContextBase *fin) | |
967 | { | |
968 | dout(10) << "store " << get_version() << dendl; | |
969 | assert(is_base()); | |
970 | ||
971 | if (snaprealm) | |
972 | purge_stale_snap_data(snaprealm->get_snaps()); | |
973 | ||
974 | // encode | |
975 | bufferlist bl; | |
976 | string magic = CEPH_FS_ONDISK_MAGIC; | |
977 | ::encode(magic, bl); | |
978 | encode_store(bl, mdcache->mds->mdsmap->get_up_features()); | |
979 | ||
980 | // write it. | |
981 | SnapContext snapc; | |
982 | ObjectOperation m; | |
983 | m.write_full(bl); | |
984 | ||
985 | object_t oid = CInode::get_object_name(ino(), frag_t(), ".inode"); | |
986 | object_locator_t oloc(mdcache->mds->mdsmap->get_metadata_pool()); | |
987 | ||
988 | Context *newfin = | |
989 | new C_OnFinisher(new C_IO_Inode_Stored(this, get_version(), fin), | |
990 | mdcache->mds->finisher); | |
991 | mdcache->mds->objecter->mutate(oid, oloc, m, snapc, | |
992 | ceph::real_clock::now(), 0, | |
993 | newfin); | |
994 | } | |
995 | ||
996 | void CInode::_stored(int r, version_t v, Context *fin) | |
997 | { | |
998 | if (r < 0) { | |
999 | dout(1) << "store error " << r << " v " << v << " on " << *this << dendl; | |
1000 | mdcache->mds->clog->error() << "failed to store ino " << ino() << " object," | |
1001 | << " errno " << r; | |
1002 | mdcache->mds->handle_write_error(r); | |
1003 | fin->complete(r); | |
1004 | return; | |
1005 | } | |
1006 | ||
1007 | dout(10) << "_stored " << v << " on " << *this << dendl; | |
1008 | if (v == get_projected_version()) | |
1009 | mark_clean(); | |
1010 | ||
1011 | fin->complete(0); | |
1012 | } | |
1013 | ||
1014 | void CInode::flush(MDSInternalContextBase *fin) | |
1015 | { | |
1016 | dout(10) << "flush " << *this << dendl; | |
1017 | assert(is_auth() && can_auth_pin()); | |
1018 | ||
1019 | MDSGatherBuilder gather(g_ceph_context); | |
1020 | ||
1021 | if (is_dirty_parent()) { | |
1022 | store_backtrace(gather.new_sub()); | |
1023 | } | |
1024 | if (is_dirty()) { | |
1025 | if (is_base()) { | |
1026 | store(gather.new_sub()); | |
1027 | } else { | |
1028 | parent->dir->commit(0, gather.new_sub()); | |
1029 | } | |
1030 | } | |
1031 | ||
1032 | if (gather.has_subs()) { | |
1033 | gather.set_finisher(fin); | |
1034 | gather.activate(); | |
1035 | } else { | |
1036 | fin->complete(0); | |
1037 | } | |
1038 | } | |
1039 | ||
1040 | struct C_IO_Inode_Fetched : public CInodeIOContext { | |
1041 | bufferlist bl, bl2; | |
1042 | Context *fin; | |
1043 | C_IO_Inode_Fetched(CInode *i, Context *f) : CInodeIOContext(i), fin(f) {} | |
1044 | void finish(int r) override { | |
1045 | // Ignore 'r', because we fetch from two places, so r is usually ENOENT | |
1046 | in->_fetched(bl, bl2, fin); | |
1047 | } | |
1048 | }; | |
1049 | ||
1050 | void CInode::fetch(MDSInternalContextBase *fin) | |
1051 | { | |
1052 | dout(10) << "fetch" << dendl; | |
1053 | ||
1054 | C_IO_Inode_Fetched *c = new C_IO_Inode_Fetched(this, fin); | |
1055 | C_GatherBuilder gather(g_ceph_context, new C_OnFinisher(c, mdcache->mds->finisher)); | |
1056 | ||
1057 | object_t oid = CInode::get_object_name(ino(), frag_t(), ""); | |
1058 | object_locator_t oloc(mdcache->mds->mdsmap->get_metadata_pool()); | |
1059 | ||
1060 | // Old on-disk format: inode stored in xattr of a dirfrag | |
1061 | ObjectOperation rd; | |
1062 | rd.getxattr("inode", &c->bl, NULL); | |
1063 | mdcache->mds->objecter->read(oid, oloc, rd, CEPH_NOSNAP, (bufferlist*)NULL, 0, gather.new_sub()); | |
1064 | ||
1065 | // Current on-disk format: inode stored in a .inode object | |
1066 | object_t oid2 = CInode::get_object_name(ino(), frag_t(), ".inode"); | |
1067 | mdcache->mds->objecter->read(oid2, oloc, 0, 0, CEPH_NOSNAP, &c->bl2, 0, gather.new_sub()); | |
1068 | ||
1069 | gather.activate(); | |
1070 | } | |
1071 | ||
1072 | void CInode::_fetched(bufferlist& bl, bufferlist& bl2, Context *fin) | |
1073 | { | |
1074 | dout(10) << "_fetched got " << bl.length() << " and " << bl2.length() << dendl; | |
1075 | bufferlist::iterator p; | |
1076 | if (bl2.length()) { | |
1077 | p = bl2.begin(); | |
1078 | } else if (bl.length()) { | |
1079 | p = bl.begin(); | |
1080 | } else { | |
1081 | derr << "No data while reading inode 0x" << std::hex << ino() | |
1082 | << std::dec << dendl; | |
1083 | fin->complete(-ENOENT); | |
1084 | return; | |
1085 | } | |
1086 | ||
1087 | // Attempt decode | |
1088 | try { | |
1089 | string magic; | |
1090 | ::decode(magic, p); | |
1091 | dout(10) << " magic is '" << magic << "' (expecting '" | |
1092 | << CEPH_FS_ONDISK_MAGIC << "')" << dendl; | |
1093 | if (magic != CEPH_FS_ONDISK_MAGIC) { | |
1094 | dout(0) << "on disk magic '" << magic << "' != my magic '" << CEPH_FS_ONDISK_MAGIC | |
1095 | << "'" << dendl; | |
1096 | fin->complete(-EINVAL); | |
1097 | } else { | |
1098 | decode_store(p); | |
1099 | dout(10) << "_fetched " << *this << dendl; | |
1100 | fin->complete(0); | |
1101 | } | |
1102 | } catch (buffer::error &err) { | |
1103 | derr << "Corrupt inode 0x" << std::hex << ino() << std::dec | |
1104 | << ": " << err << dendl; | |
1105 | fin->complete(-EINVAL); | |
1106 | return; | |
1107 | } | |
1108 | } | |
1109 | ||
1110 | void CInode::build_backtrace(int64_t pool, inode_backtrace_t& bt) | |
1111 | { | |
1112 | bt.ino = inode.ino; | |
1113 | bt.ancestors.clear(); | |
1114 | bt.pool = pool; | |
1115 | ||
1116 | CInode *in = this; | |
1117 | CDentry *pdn = get_parent_dn(); | |
1118 | while (pdn) { | |
1119 | CInode *diri = pdn->get_dir()->get_inode(); | |
1120 | bt.ancestors.push_back(inode_backpointer_t(diri->ino(), pdn->name, in->inode.version)); | |
1121 | in = diri; | |
1122 | pdn = in->get_parent_dn(); | |
1123 | } | |
1124 | for (compact_set<int64_t>::iterator i = inode.old_pools.begin(); | |
1125 | i != inode.old_pools.end(); | |
1126 | ++i) { | |
1127 | // don't add our own pool id to old_pools to avoid looping (e.g. setlayout 0, 1, 0) | |
1128 | if (*i != pool) | |
1129 | bt.old_pools.insert(*i); | |
1130 | } | |
1131 | } | |
1132 | ||
1133 | struct C_IO_Inode_StoredBacktrace : public CInodeIOContext { | |
1134 | version_t version; | |
1135 | Context *fin; | |
1136 | C_IO_Inode_StoredBacktrace(CInode *i, version_t v, Context *f) : CInodeIOContext(i), version(v), fin(f) {} | |
1137 | void finish(int r) override { | |
1138 | in->_stored_backtrace(r, version, fin); | |
1139 | } | |
1140 | }; | |
1141 | ||
1142 | void CInode::store_backtrace(MDSInternalContextBase *fin, int op_prio) | |
1143 | { | |
1144 | dout(10) << "store_backtrace on " << *this << dendl; | |
1145 | assert(is_dirty_parent()); | |
1146 | ||
1147 | if (op_prio < 0) | |
1148 | op_prio = CEPH_MSG_PRIO_DEFAULT; | |
1149 | ||
1150 | auth_pin(this); | |
1151 | ||
1152 | const int64_t pool = get_backtrace_pool(); | |
1153 | inode_backtrace_t bt; | |
1154 | build_backtrace(pool, bt); | |
1155 | bufferlist parent_bl; | |
1156 | ::encode(bt, parent_bl); | |
1157 | ||
1158 | ObjectOperation op; | |
1159 | op.priority = op_prio; | |
1160 | op.create(false); | |
1161 | op.setxattr("parent", parent_bl); | |
1162 | ||
1163 | bufferlist layout_bl; | |
1164 | ::encode(inode.layout, layout_bl, mdcache->mds->mdsmap->get_up_features()); | |
1165 | op.setxattr("layout", layout_bl); | |
1166 | ||
1167 | SnapContext snapc; | |
1168 | object_t oid = get_object_name(ino(), frag_t(), ""); | |
1169 | object_locator_t oloc(pool); | |
1170 | Context *fin2 = new C_OnFinisher( | |
1171 | new C_IO_Inode_StoredBacktrace(this, inode.backtrace_version, fin), | |
1172 | mdcache->mds->finisher); | |
1173 | ||
1174 | if (!state_test(STATE_DIRTYPOOL) || inode.old_pools.empty()) { | |
1175 | dout(20) << __func__ << ": no dirtypool or no old pools" << dendl; | |
1176 | mdcache->mds->objecter->mutate(oid, oloc, op, snapc, | |
1177 | ceph::real_clock::now(), | |
1178 | 0, fin2); | |
1179 | return; | |
1180 | } | |
1181 | ||
1182 | C_GatherBuilder gather(g_ceph_context, fin2); | |
1183 | mdcache->mds->objecter->mutate(oid, oloc, op, snapc, | |
1184 | ceph::real_clock::now(), | |
1185 | 0, gather.new_sub()); | |
1186 | ||
1187 | // In the case where DIRTYPOOL is set, we update all old pools backtraces | |
1188 | // such that anyone reading them will see the new pool ID in | |
1189 | // inode_backtrace_t::pool and go read everything else from there. | |
1190 | for (compact_set<int64_t>::iterator p = inode.old_pools.begin(); | |
1191 | p != inode.old_pools.end(); | |
1192 | ++p) { | |
1193 | if (*p == pool) | |
1194 | continue; | |
1195 | ||
1196 | dout(20) << __func__ << ": updating old pool " << *p << dendl; | |
1197 | ||
1198 | ObjectOperation op; | |
1199 | op.priority = op_prio; | |
1200 | op.create(false); | |
1201 | op.setxattr("parent", parent_bl); | |
1202 | ||
1203 | object_locator_t oloc(*p); | |
1204 | mdcache->mds->objecter->mutate(oid, oloc, op, snapc, | |
1205 | ceph::real_clock::now(), | |
1206 | 0, gather.new_sub()); | |
1207 | } | |
1208 | gather.activate(); | |
1209 | } | |
1210 | ||
1211 | void CInode::_stored_backtrace(int r, version_t v, Context *fin) | |
1212 | { | |
1213 | if (r == -ENOENT) { | |
1214 | const int64_t pool = get_backtrace_pool(); | |
1215 | bool exists = mdcache->mds->objecter->with_osdmap( | |
1216 | [pool](const OSDMap &osd_map) { | |
1217 | return osd_map.have_pg_pool(pool); | |
1218 | }); | |
1219 | ||
1220 | // This ENOENT is because the pool doesn't exist (the user deleted it | |
1221 | // out from under us), so the backtrace can never be written, so pretend | |
1222 | // to succeed so that the user can proceed to e.g. delete the file. | |
1223 | if (!exists) { | |
1224 | dout(4) << "store_backtrace got ENOENT: a data pool was deleted " | |
1225 | "beneath us!" << dendl; | |
1226 | r = 0; | |
1227 | } | |
1228 | } | |
1229 | ||
1230 | if (r < 0) { | |
1231 | dout(1) << "store backtrace error " << r << " v " << v << dendl; | |
1232 | mdcache->mds->clog->error() << "failed to store backtrace on ino " | |
1233 | << ino() << " object" | |
1234 | << ", pool " << get_backtrace_pool() | |
1235 | << ", errno " << r; | |
1236 | mdcache->mds->handle_write_error(r); | |
1237 | if (fin) | |
1238 | fin->complete(r); | |
1239 | return; | |
1240 | } | |
1241 | ||
1242 | dout(10) << "_stored_backtrace v " << v << dendl; | |
1243 | ||
1244 | auth_unpin(this); | |
1245 | if (v == inode.backtrace_version) | |
1246 | clear_dirty_parent(); | |
1247 | if (fin) | |
1248 | fin->complete(0); | |
1249 | } | |
1250 | ||
1251 | void CInode::fetch_backtrace(Context *fin, bufferlist *backtrace) | |
1252 | { | |
1253 | mdcache->fetch_backtrace(inode.ino, get_backtrace_pool(), *backtrace, fin); | |
1254 | } | |
1255 | ||
1256 | void CInode::_mark_dirty_parent(LogSegment *ls, bool dirty_pool) | |
1257 | { | |
1258 | if (!state_test(STATE_DIRTYPARENT)) { | |
1259 | dout(10) << "mark_dirty_parent" << dendl; | |
1260 | state_set(STATE_DIRTYPARENT); | |
1261 | get(PIN_DIRTYPARENT); | |
1262 | assert(ls); | |
1263 | } | |
1264 | if (dirty_pool) | |
1265 | state_set(STATE_DIRTYPOOL); | |
1266 | if (ls) | |
1267 | ls->dirty_parent_inodes.push_back(&item_dirty_parent); | |
1268 | } | |
1269 | ||
1270 | void CInode::clear_dirty_parent() | |
1271 | { | |
1272 | if (state_test(STATE_DIRTYPARENT)) { | |
1273 | dout(10) << "clear_dirty_parent" << dendl; | |
1274 | state_clear(STATE_DIRTYPARENT); | |
1275 | state_clear(STATE_DIRTYPOOL); | |
1276 | put(PIN_DIRTYPARENT); | |
1277 | item_dirty_parent.remove_myself(); | |
1278 | } | |
1279 | } | |
1280 | ||
1281 | void CInode::verify_diri_backtrace(bufferlist &bl, int err) | |
1282 | { | |
1283 | if (is_base() || is_dirty_parent() || !is_auth()) | |
1284 | return; | |
1285 | ||
1286 | dout(10) << "verify_diri_backtrace" << dendl; | |
1287 | ||
1288 | if (err == 0) { | |
1289 | inode_backtrace_t backtrace; | |
1290 | ::decode(backtrace, bl); | |
1291 | CDentry *pdn = get_parent_dn(); | |
1292 | if (backtrace.ancestors.empty() || | |
1293 | backtrace.ancestors[0].dname != pdn->name || | |
1294 | backtrace.ancestors[0].dirino != pdn->get_dir()->ino()) | |
1295 | err = -EINVAL; | |
1296 | } | |
1297 | ||
1298 | if (err) { | |
1299 | MDSRank *mds = mdcache->mds; | |
1300 | mds->clog->error() << "bad backtrace on dir ino " << ino(); | |
1301 | assert(!"bad backtrace" == (g_conf->mds_verify_backtrace > 1)); | |
1302 | ||
1303 | _mark_dirty_parent(mds->mdlog->get_current_segment(), false); | |
1304 | mds->mdlog->flush(); | |
1305 | } | |
1306 | } | |
1307 | ||
1308 | // ------------------ | |
1309 | // parent dir | |
1310 | ||
1311 | ||
1312 | void InodeStoreBase::encode_bare(bufferlist &bl, uint64_t features, | |
1313 | const bufferlist *snap_blob) const | |
1314 | { | |
1315 | ::encode(inode, bl, features); | |
1316 | if (is_symlink()) | |
1317 | ::encode(symlink, bl); | |
1318 | ::encode(dirfragtree, bl); | |
1319 | ::encode(xattrs, bl); | |
1320 | if (snap_blob) | |
1321 | ::encode(*snap_blob, bl); | |
1322 | else | |
1323 | ::encode(bufferlist(), bl); | |
1324 | ::encode(old_inodes, bl, features); | |
1325 | ::encode(oldest_snap, bl); | |
1326 | ::encode(damage_flags, bl); | |
1327 | } | |
1328 | ||
1329 | void InodeStoreBase::encode(bufferlist &bl, uint64_t features, | |
1330 | const bufferlist *snap_blob) const | |
1331 | { | |
1332 | ENCODE_START(6, 4, bl); | |
1333 | encode_bare(bl, features, snap_blob); | |
1334 | ENCODE_FINISH(bl); | |
1335 | } | |
1336 | ||
1337 | void CInode::encode_store(bufferlist& bl, uint64_t features) | |
1338 | { | |
1339 | bufferlist snap_blob; | |
1340 | encode_snap_blob(snap_blob); | |
1341 | InodeStoreBase::encode(bl, mdcache->mds->mdsmap->get_up_features(), | |
1342 | &snap_blob); | |
1343 | } | |
1344 | ||
1345 | void InodeStoreBase::decode_bare(bufferlist::iterator &bl, | |
1346 | bufferlist& snap_blob, __u8 struct_v) | |
1347 | { | |
1348 | ::decode(inode, bl); | |
1349 | if (is_symlink()) | |
1350 | ::decode(symlink, bl); | |
1351 | ::decode(dirfragtree, bl); | |
1352 | ::decode(xattrs, bl); | |
1353 | ::decode(snap_blob, bl); | |
1354 | ||
1355 | ::decode(old_inodes, bl); | |
1356 | if (struct_v == 2 && inode.is_dir()) { | |
1357 | bool default_layout_exists; | |
1358 | ::decode(default_layout_exists, bl); | |
1359 | if (default_layout_exists) { | |
1360 | ::decode(struct_v, bl); // this was a default_file_layout | |
1361 | ::decode(inode.layout, bl); // but we only care about the layout portion | |
1362 | } | |
1363 | } | |
1364 | ||
1365 | if (struct_v >= 5) { | |
1366 | // InodeStore is embedded in dentries without proper versioning, so | |
1367 | // we consume up to the end of the buffer | |
1368 | if (!bl.end()) { | |
1369 | ::decode(oldest_snap, bl); | |
1370 | } | |
1371 | ||
1372 | if (!bl.end()) { | |
1373 | ::decode(damage_flags, bl); | |
1374 | } | |
1375 | } | |
1376 | } | |
1377 | ||
1378 | ||
1379 | void InodeStoreBase::decode(bufferlist::iterator &bl, bufferlist& snap_blob) | |
1380 | { | |
1381 | DECODE_START_LEGACY_COMPAT_LEN(5, 4, 4, bl); | |
1382 | decode_bare(bl, snap_blob, struct_v); | |
1383 | DECODE_FINISH(bl); | |
1384 | } | |
1385 | ||
1386 | void CInode::decode_store(bufferlist::iterator& bl) | |
1387 | { | |
1388 | bufferlist snap_blob; | |
1389 | InodeStoreBase::decode(bl, snap_blob); | |
1390 | decode_snap_blob(snap_blob); | |
1391 | } | |
1392 | ||
1393 | // ------------------ | |
1394 | // locking | |
1395 | ||
1396 | void CInode::set_object_info(MDSCacheObjectInfo &info) | |
1397 | { | |
1398 | info.ino = ino(); | |
1399 | info.snapid = last; | |
1400 | } | |
1401 | ||
1402 | void CInode::encode_lock_state(int type, bufferlist& bl) | |
1403 | { | |
1404 | ::encode(first, bl); | |
1405 | ||
1406 | switch (type) { | |
1407 | case CEPH_LOCK_IAUTH: | |
1408 | ::encode(inode.version, bl); | |
1409 | ::encode(inode.ctime, bl); | |
1410 | ::encode(inode.mode, bl); | |
1411 | ::encode(inode.uid, bl); | |
1412 | ::encode(inode.gid, bl); | |
1413 | break; | |
1414 | ||
1415 | case CEPH_LOCK_ILINK: | |
1416 | ::encode(inode.version, bl); | |
1417 | ::encode(inode.ctime, bl); | |
1418 | ::encode(inode.nlink, bl); | |
1419 | break; | |
1420 | ||
1421 | case CEPH_LOCK_IDFT: | |
1422 | if (is_auth()) { | |
1423 | ::encode(inode.version, bl); | |
1424 | } else { | |
1425 | // treat flushing as dirty when rejoining cache | |
1426 | bool dirty = dirfragtreelock.is_dirty_or_flushing(); | |
1427 | ::encode(dirty, bl); | |
1428 | } | |
1429 | { | |
1430 | // encode the raw tree | |
1431 | ::encode(dirfragtree, bl); | |
1432 | ||
1433 | // also specify which frags are mine | |
1434 | set<frag_t> myfrags; | |
1435 | list<CDir*> dfls; | |
1436 | get_dirfrags(dfls); | |
1437 | for (list<CDir*>::iterator p = dfls.begin(); p != dfls.end(); ++p) | |
1438 | if ((*p)->is_auth()) { | |
1439 | frag_t fg = (*p)->get_frag(); | |
1440 | myfrags.insert(fg); | |
1441 | } | |
1442 | ::encode(myfrags, bl); | |
1443 | } | |
1444 | break; | |
1445 | ||
1446 | case CEPH_LOCK_IFILE: | |
1447 | if (is_auth()) { | |
1448 | ::encode(inode.version, bl); | |
1449 | ::encode(inode.ctime, bl); | |
1450 | ::encode(inode.mtime, bl); | |
1451 | ::encode(inode.atime, bl); | |
1452 | ::encode(inode.time_warp_seq, bl); | |
1453 | if (!is_dir()) { | |
1454 | ::encode(inode.layout, bl, mdcache->mds->mdsmap->get_up_features()); | |
1455 | ::encode(inode.size, bl); | |
1456 | ::encode(inode.truncate_seq, bl); | |
1457 | ::encode(inode.truncate_size, bl); | |
1458 | ::encode(inode.client_ranges, bl); | |
1459 | ::encode(inode.inline_data, bl); | |
1460 | } | |
1461 | } else { | |
1462 | // treat flushing as dirty when rejoining cache | |
1463 | bool dirty = filelock.is_dirty_or_flushing(); | |
1464 | ::encode(dirty, bl); | |
1465 | } | |
1466 | ||
1467 | { | |
1468 | dout(15) << "encode_lock_state inode.dirstat is " << inode.dirstat << dendl; | |
1469 | ::encode(inode.dirstat, bl); // only meaningful if i am auth. | |
1470 | bufferlist tmp; | |
1471 | __u32 n = 0; | |
1472 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
1473 | p != dirfrags.end(); | |
1474 | ++p) { | |
1475 | frag_t fg = p->first; | |
1476 | CDir *dir = p->second; | |
1477 | if (is_auth() || dir->is_auth()) { | |
1478 | fnode_t *pf = dir->get_projected_fnode(); | |
1479 | dout(15) << fg << " " << *dir << dendl; | |
1480 | dout(20) << fg << " fragstat " << pf->fragstat << dendl; | |
1481 | dout(20) << fg << " accounted_fragstat " << pf->accounted_fragstat << dendl; | |
1482 | ::encode(fg, tmp); | |
1483 | ::encode(dir->first, tmp); | |
1484 | ::encode(pf->fragstat, tmp); | |
1485 | ::encode(pf->accounted_fragstat, tmp); | |
1486 | n++; | |
1487 | } | |
1488 | } | |
1489 | ::encode(n, bl); | |
1490 | bl.claim_append(tmp); | |
1491 | } | |
1492 | break; | |
1493 | ||
1494 | case CEPH_LOCK_INEST: | |
1495 | if (is_auth()) { | |
1496 | ::encode(inode.version, bl); | |
1497 | } else { | |
1498 | // treat flushing as dirty when rejoining cache | |
1499 | bool dirty = nestlock.is_dirty_or_flushing(); | |
1500 | ::encode(dirty, bl); | |
1501 | } | |
1502 | { | |
1503 | dout(15) << "encode_lock_state inode.rstat is " << inode.rstat << dendl; | |
1504 | ::encode(inode.rstat, bl); // only meaningful if i am auth. | |
1505 | bufferlist tmp; | |
1506 | __u32 n = 0; | |
1507 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
1508 | p != dirfrags.end(); | |
1509 | ++p) { | |
1510 | frag_t fg = p->first; | |
1511 | CDir *dir = p->second; | |
1512 | if (is_auth() || dir->is_auth()) { | |
1513 | fnode_t *pf = dir->get_projected_fnode(); | |
1514 | dout(10) << fg << " " << *dir << dendl; | |
1515 | dout(10) << fg << " " << pf->rstat << dendl; | |
1516 | dout(10) << fg << " " << pf->rstat << dendl; | |
1517 | dout(10) << fg << " " << dir->dirty_old_rstat << dendl; | |
1518 | ::encode(fg, tmp); | |
1519 | ::encode(dir->first, tmp); | |
1520 | ::encode(pf->rstat, tmp); | |
1521 | ::encode(pf->accounted_rstat, tmp); | |
1522 | ::encode(dir->dirty_old_rstat, tmp); | |
1523 | n++; | |
1524 | } | |
1525 | } | |
1526 | ::encode(n, bl); | |
1527 | bl.claim_append(tmp); | |
1528 | } | |
1529 | break; | |
1530 | ||
1531 | case CEPH_LOCK_IXATTR: | |
1532 | ::encode(inode.version, bl); | |
1533 | ::encode(inode.ctime, bl); | |
1534 | ::encode(xattrs, bl); | |
1535 | break; | |
1536 | ||
1537 | case CEPH_LOCK_ISNAP: | |
1538 | ::encode(inode.version, bl); | |
1539 | ::encode(inode.ctime, bl); | |
1540 | encode_snap(bl); | |
1541 | break; | |
1542 | ||
1543 | case CEPH_LOCK_IFLOCK: | |
1544 | ::encode(inode.version, bl); | |
1545 | _encode_file_locks(bl); | |
1546 | break; | |
1547 | ||
1548 | case CEPH_LOCK_IPOLICY: | |
1549 | if (inode.is_dir()) { | |
1550 | ::encode(inode.version, bl); | |
1551 | ::encode(inode.ctime, bl); | |
1552 | ::encode(inode.layout, bl, mdcache->mds->mdsmap->get_up_features()); | |
1553 | ::encode(inode.quota, bl); | |
1554 | ::encode(inode.export_pin, bl); | |
1555 | } | |
1556 | break; | |
1557 | ||
1558 | default: | |
1559 | ceph_abort(); | |
1560 | } | |
1561 | } | |
1562 | ||
1563 | ||
1564 | /* for more info on scatterlocks, see comments by Locker::scatter_writebehind */ | |
1565 | ||
1566 | void CInode::decode_lock_state(int type, bufferlist& bl) | |
1567 | { | |
1568 | bufferlist::iterator p = bl.begin(); | |
1569 | utime_t tm; | |
1570 | ||
1571 | snapid_t newfirst; | |
1572 | ::decode(newfirst, p); | |
1573 | ||
1574 | if (!is_auth() && newfirst != first) { | |
1575 | dout(10) << "decode_lock_state first " << first << " -> " << newfirst << dendl; | |
1576 | assert(newfirst > first); | |
1577 | if (!is_multiversion() && parent) { | |
1578 | assert(parent->first == first); | |
1579 | parent->first = newfirst; | |
1580 | } | |
1581 | first = newfirst; | |
1582 | } | |
1583 | ||
1584 | switch (type) { | |
1585 | case CEPH_LOCK_IAUTH: | |
1586 | ::decode(inode.version, p); | |
1587 | ::decode(tm, p); | |
1588 | if (inode.ctime < tm) inode.ctime = tm; | |
1589 | ::decode(inode.mode, p); | |
1590 | ::decode(inode.uid, p); | |
1591 | ::decode(inode.gid, p); | |
1592 | break; | |
1593 | ||
1594 | case CEPH_LOCK_ILINK: | |
1595 | ::decode(inode.version, p); | |
1596 | ::decode(tm, p); | |
1597 | if (inode.ctime < tm) inode.ctime = tm; | |
1598 | ::decode(inode.nlink, p); | |
1599 | break; | |
1600 | ||
1601 | case CEPH_LOCK_IDFT: | |
1602 | if (is_auth()) { | |
1603 | bool replica_dirty; | |
1604 | ::decode(replica_dirty, p); | |
1605 | if (replica_dirty) { | |
1606 | dout(10) << "decode_lock_state setting dftlock dirty flag" << dendl; | |
1607 | dirfragtreelock.mark_dirty(); // ok bc we're auth and caller will handle | |
1608 | } | |
1609 | } else { | |
1610 | ::decode(inode.version, p); | |
1611 | } | |
1612 | { | |
1613 | fragtree_t temp; | |
1614 | ::decode(temp, p); | |
1615 | set<frag_t> authfrags; | |
1616 | ::decode(authfrags, p); | |
1617 | if (is_auth()) { | |
1618 | // auth. believe replica's auth frags only. | |
1619 | for (set<frag_t>::iterator p = authfrags.begin(); p != authfrags.end(); ++p) | |
1620 | if (!dirfragtree.is_leaf(*p)) { | |
1621 | dout(10) << " forcing frag " << *p << " to leaf (split|merge)" << dendl; | |
1622 | dirfragtree.force_to_leaf(g_ceph_context, *p); | |
1623 | dirfragtreelock.mark_dirty(); // ok bc we're auth and caller will handle | |
1624 | } | |
1625 | } else { | |
1626 | // replica. take the new tree, BUT make sure any open | |
1627 | // dirfrags remain leaves (they may have split _after_ this | |
1628 | // dft was scattered, or we may still be be waiting on the | |
1629 | // notify from the auth) | |
1630 | dirfragtree.swap(temp); | |
1631 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
1632 | p != dirfrags.end(); | |
1633 | ++p) { | |
1634 | if (!dirfragtree.is_leaf(p->first)) { | |
1635 | dout(10) << " forcing open dirfrag " << p->first << " to leaf (racing with split|merge)" << dendl; | |
1636 | dirfragtree.force_to_leaf(g_ceph_context, p->first); | |
1637 | } | |
1638 | if (p->second->is_auth()) | |
1639 | p->second->state_clear(CDir::STATE_DIRTYDFT); | |
1640 | } | |
1641 | } | |
1642 | if (g_conf->mds_debug_frag) | |
1643 | verify_dirfrags(); | |
1644 | } | |
1645 | break; | |
1646 | ||
1647 | case CEPH_LOCK_IFILE: | |
1648 | if (!is_auth()) { | |
1649 | ::decode(inode.version, p); | |
1650 | ::decode(tm, p); | |
1651 | if (inode.ctime < tm) inode.ctime = tm; | |
1652 | ::decode(inode.mtime, p); | |
1653 | ::decode(inode.atime, p); | |
1654 | ::decode(inode.time_warp_seq, p); | |
1655 | if (!is_dir()) { | |
1656 | ::decode(inode.layout, p); | |
1657 | ::decode(inode.size, p); | |
1658 | ::decode(inode.truncate_seq, p); | |
1659 | ::decode(inode.truncate_size, p); | |
1660 | ::decode(inode.client_ranges, p); | |
1661 | ::decode(inode.inline_data, p); | |
1662 | } | |
1663 | } else { | |
1664 | bool replica_dirty; | |
1665 | ::decode(replica_dirty, p); | |
1666 | if (replica_dirty) { | |
1667 | dout(10) << "decode_lock_state setting filelock dirty flag" << dendl; | |
1668 | filelock.mark_dirty(); // ok bc we're auth and caller will handle | |
1669 | } | |
1670 | } | |
1671 | { | |
1672 | frag_info_t dirstat; | |
1673 | ::decode(dirstat, p); | |
1674 | if (!is_auth()) { | |
1675 | dout(10) << " taking inode dirstat " << dirstat << " for " << *this << dendl; | |
1676 | inode.dirstat = dirstat; // take inode summation if replica | |
1677 | } | |
1678 | __u32 n; | |
1679 | ::decode(n, p); | |
1680 | dout(10) << " ...got " << n << " fragstats on " << *this << dendl; | |
1681 | while (n--) { | |
1682 | frag_t fg; | |
1683 | snapid_t fgfirst; | |
1684 | frag_info_t fragstat; | |
1685 | frag_info_t accounted_fragstat; | |
1686 | ::decode(fg, p); | |
1687 | ::decode(fgfirst, p); | |
1688 | ::decode(fragstat, p); | |
1689 | ::decode(accounted_fragstat, p); | |
1690 | dout(10) << fg << " [" << fgfirst << ",head] " << dendl; | |
1691 | dout(10) << fg << " fragstat " << fragstat << dendl; | |
1692 | dout(20) << fg << " accounted_fragstat " << accounted_fragstat << dendl; | |
1693 | ||
1694 | CDir *dir = get_dirfrag(fg); | |
1695 | if (is_auth()) { | |
1696 | assert(dir); // i am auth; i had better have this dir open | |
1697 | dout(10) << fg << " first " << dir->first << " -> " << fgfirst | |
1698 | << " on " << *dir << dendl; | |
1699 | dir->first = fgfirst; | |
1700 | dir->fnode.fragstat = fragstat; | |
1701 | dir->fnode.accounted_fragstat = accounted_fragstat; | |
1702 | dir->first = fgfirst; | |
1703 | if (!(fragstat == accounted_fragstat)) { | |
1704 | dout(10) << fg << " setting filelock updated flag" << dendl; | |
1705 | filelock.mark_dirty(); // ok bc we're auth and caller will handle | |
1706 | } | |
1707 | } else { | |
1708 | if (dir && dir->is_auth()) { | |
1709 | dout(10) << fg << " first " << dir->first << " -> " << fgfirst | |
1710 | << " on " << *dir << dendl; | |
1711 | dir->first = fgfirst; | |
1712 | fnode_t *pf = dir->get_projected_fnode(); | |
1713 | finish_scatter_update(&filelock, dir, | |
1714 | inode.dirstat.version, pf->accounted_fragstat.version); | |
1715 | } | |
1716 | } | |
1717 | } | |
1718 | } | |
1719 | break; | |
1720 | ||
1721 | case CEPH_LOCK_INEST: | |
1722 | if (is_auth()) { | |
1723 | bool replica_dirty; | |
1724 | ::decode(replica_dirty, p); | |
1725 | if (replica_dirty) { | |
1726 | dout(10) << "decode_lock_state setting nestlock dirty flag" << dendl; | |
1727 | nestlock.mark_dirty(); // ok bc we're auth and caller will handle | |
1728 | } | |
1729 | } else { | |
1730 | ::decode(inode.version, p); | |
1731 | } | |
1732 | { | |
1733 | nest_info_t rstat; | |
1734 | ::decode(rstat, p); | |
1735 | if (!is_auth()) { | |
1736 | dout(10) << " taking inode rstat " << rstat << " for " << *this << dendl; | |
1737 | inode.rstat = rstat; // take inode summation if replica | |
1738 | } | |
1739 | __u32 n; | |
1740 | ::decode(n, p); | |
1741 | while (n--) { | |
1742 | frag_t fg; | |
1743 | snapid_t fgfirst; | |
1744 | nest_info_t rstat; | |
1745 | nest_info_t accounted_rstat; | |
1746 | compact_map<snapid_t,old_rstat_t> dirty_old_rstat; | |
1747 | ::decode(fg, p); | |
1748 | ::decode(fgfirst, p); | |
1749 | ::decode(rstat, p); | |
1750 | ::decode(accounted_rstat, p); | |
1751 | ::decode(dirty_old_rstat, p); | |
1752 | dout(10) << fg << " [" << fgfirst << ",head]" << dendl; | |
1753 | dout(10) << fg << " rstat " << rstat << dendl; | |
1754 | dout(10) << fg << " accounted_rstat " << accounted_rstat << dendl; | |
1755 | dout(10) << fg << " dirty_old_rstat " << dirty_old_rstat << dendl; | |
1756 | ||
1757 | CDir *dir = get_dirfrag(fg); | |
1758 | if (is_auth()) { | |
1759 | assert(dir); // i am auth; i had better have this dir open | |
1760 | dout(10) << fg << " first " << dir->first << " -> " << fgfirst | |
1761 | << " on " << *dir << dendl; | |
1762 | dir->first = fgfirst; | |
1763 | dir->fnode.rstat = rstat; | |
1764 | dir->fnode.accounted_rstat = accounted_rstat; | |
1765 | dir->dirty_old_rstat.swap(dirty_old_rstat); | |
1766 | if (!(rstat == accounted_rstat) || !dir->dirty_old_rstat.empty()) { | |
1767 | dout(10) << fg << " setting nestlock updated flag" << dendl; | |
1768 | nestlock.mark_dirty(); // ok bc we're auth and caller will handle | |
1769 | } | |
1770 | } else { | |
1771 | if (dir && dir->is_auth()) { | |
1772 | dout(10) << fg << " first " << dir->first << " -> " << fgfirst | |
1773 | << " on " << *dir << dendl; | |
1774 | dir->first = fgfirst; | |
1775 | fnode_t *pf = dir->get_projected_fnode(); | |
1776 | finish_scatter_update(&nestlock, dir, | |
1777 | inode.rstat.version, pf->accounted_rstat.version); | |
1778 | } | |
1779 | } | |
1780 | } | |
1781 | } | |
1782 | break; | |
1783 | ||
1784 | case CEPH_LOCK_IXATTR: | |
1785 | ::decode(inode.version, p); | |
1786 | ::decode(tm, p); | |
1787 | if (inode.ctime < tm) inode.ctime = tm; | |
1788 | ::decode(xattrs, p); | |
1789 | break; | |
1790 | ||
1791 | case CEPH_LOCK_ISNAP: | |
1792 | { | |
1793 | ::decode(inode.version, p); | |
1794 | ::decode(tm, p); | |
1795 | if (inode.ctime < tm) inode.ctime = tm; | |
1796 | snapid_t seq = 0; | |
1797 | if (snaprealm) | |
1798 | seq = snaprealm->srnode.seq; | |
1799 | decode_snap(p); | |
1800 | if (snaprealm && snaprealm->srnode.seq != seq) | |
1801 | mdcache->do_realm_invalidate_and_update_notify(this, seq ? CEPH_SNAP_OP_UPDATE:CEPH_SNAP_OP_SPLIT); | |
1802 | } | |
1803 | break; | |
1804 | ||
1805 | case CEPH_LOCK_IFLOCK: | |
1806 | ::decode(inode.version, p); | |
1807 | _decode_file_locks(p); | |
1808 | break; | |
1809 | ||
1810 | case CEPH_LOCK_IPOLICY: | |
1811 | if (inode.is_dir()) { | |
1812 | ::decode(inode.version, p); | |
1813 | ::decode(tm, p); | |
1814 | if (inode.ctime < tm) inode.ctime = tm; | |
1815 | ::decode(inode.layout, p); | |
1816 | ::decode(inode.quota, p); | |
31f18b77 | 1817 | mds_rank_t old_pin = inode.export_pin; |
7c673cae | 1818 | ::decode(inode.export_pin, p); |
31f18b77 | 1819 | maybe_export_pin(old_pin != inode.export_pin); |
7c673cae FG |
1820 | } |
1821 | break; | |
1822 | ||
1823 | default: | |
1824 | ceph_abort(); | |
1825 | } | |
1826 | } | |
1827 | ||
1828 | ||
1829 | bool CInode::is_dirty_scattered() | |
1830 | { | |
1831 | return | |
1832 | filelock.is_dirty_or_flushing() || | |
1833 | nestlock.is_dirty_or_flushing() || | |
1834 | dirfragtreelock.is_dirty_or_flushing(); | |
1835 | } | |
1836 | ||
1837 | void CInode::clear_scatter_dirty() | |
1838 | { | |
1839 | filelock.remove_dirty(); | |
1840 | nestlock.remove_dirty(); | |
1841 | dirfragtreelock.remove_dirty(); | |
1842 | } | |
1843 | ||
1844 | void CInode::clear_dirty_scattered(int type) | |
1845 | { | |
1846 | dout(10) << "clear_dirty_scattered " << type << " on " << *this << dendl; | |
1847 | switch (type) { | |
1848 | case CEPH_LOCK_IFILE: | |
1849 | item_dirty_dirfrag_dir.remove_myself(); | |
1850 | break; | |
1851 | ||
1852 | case CEPH_LOCK_INEST: | |
1853 | item_dirty_dirfrag_nest.remove_myself(); | |
1854 | break; | |
1855 | ||
1856 | case CEPH_LOCK_IDFT: | |
1857 | item_dirty_dirfrag_dirfragtree.remove_myself(); | |
1858 | break; | |
1859 | ||
1860 | default: | |
1861 | ceph_abort(); | |
1862 | } | |
1863 | } | |
1864 | ||
1865 | ||
1866 | /* | |
1867 | * when we initially scatter a lock, we need to check if any of the dirfrags | |
1868 | * have out of date accounted_rstat/fragstat. if so, mark the lock stale. | |
1869 | */ | |
1870 | /* for more info on scatterlocks, see comments by Locker::scatter_writebehind */ | |
1871 | void CInode::start_scatter(ScatterLock *lock) | |
1872 | { | |
1873 | dout(10) << "start_scatter " << *lock << " on " << *this << dendl; | |
1874 | assert(is_auth()); | |
1875 | inode_t *pi = get_projected_inode(); | |
1876 | ||
1877 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
1878 | p != dirfrags.end(); | |
1879 | ++p) { | |
1880 | frag_t fg = p->first; | |
1881 | CDir *dir = p->second; | |
1882 | fnode_t *pf = dir->get_projected_fnode(); | |
1883 | dout(20) << fg << " " << *dir << dendl; | |
1884 | ||
1885 | if (!dir->is_auth()) | |
1886 | continue; | |
1887 | ||
1888 | switch (lock->get_type()) { | |
1889 | case CEPH_LOCK_IFILE: | |
1890 | finish_scatter_update(lock, dir, pi->dirstat.version, pf->accounted_fragstat.version); | |
1891 | break; | |
1892 | ||
1893 | case CEPH_LOCK_INEST: | |
1894 | finish_scatter_update(lock, dir, pi->rstat.version, pf->accounted_rstat.version); | |
1895 | break; | |
1896 | ||
1897 | case CEPH_LOCK_IDFT: | |
1898 | dir->state_clear(CDir::STATE_DIRTYDFT); | |
1899 | break; | |
1900 | } | |
1901 | } | |
1902 | } | |
1903 | ||
1904 | ||
1905 | class C_Inode_FragUpdate : public MDSLogContextBase { | |
1906 | protected: | |
1907 | CInode *in; | |
1908 | CDir *dir; | |
1909 | MutationRef mut; | |
1910 | MDSRank *get_mds() override {return in->mdcache->mds;} | |
1911 | void finish(int r) override { | |
1912 | in->_finish_frag_update(dir, mut); | |
1913 | } | |
1914 | ||
1915 | public: | |
1916 | C_Inode_FragUpdate(CInode *i, CDir *d, MutationRef& m) : in(i), dir(d), mut(m) {} | |
1917 | }; | |
1918 | ||
1919 | void CInode::finish_scatter_update(ScatterLock *lock, CDir *dir, | |
1920 | version_t inode_version, version_t dir_accounted_version) | |
1921 | { | |
1922 | frag_t fg = dir->get_frag(); | |
1923 | assert(dir->is_auth()); | |
1924 | ||
1925 | if (dir->is_frozen()) { | |
1926 | dout(10) << "finish_scatter_update " << fg << " frozen, marking " << *lock << " stale " << *dir << dendl; | |
1927 | } else if (dir->get_version() == 0) { | |
1928 | dout(10) << "finish_scatter_update " << fg << " not loaded, marking " << *lock << " stale " << *dir << dendl; | |
1929 | } else { | |
1930 | if (dir_accounted_version != inode_version) { | |
1931 | dout(10) << "finish_scatter_update " << fg << " journaling accounted scatterstat update v" << inode_version << dendl; | |
1932 | ||
1933 | MDLog *mdlog = mdcache->mds->mdlog; | |
1934 | MutationRef mut(new MutationImpl()); | |
1935 | mut->ls = mdlog->get_current_segment(); | |
1936 | ||
1937 | inode_t *pi = get_projected_inode(); | |
1938 | fnode_t *pf = dir->project_fnode(); | |
7c673cae FG |
1939 | |
1940 | const char *ename = 0; | |
1941 | switch (lock->get_type()) { | |
1942 | case CEPH_LOCK_IFILE: | |
1943 | pf->fragstat.version = pi->dirstat.version; | |
1944 | pf->accounted_fragstat = pf->fragstat; | |
1945 | ename = "lock ifile accounted scatter stat update"; | |
1946 | break; | |
1947 | case CEPH_LOCK_INEST: | |
1948 | pf->rstat.version = pi->rstat.version; | |
1949 | pf->accounted_rstat = pf->rstat; | |
1950 | ename = "lock inest accounted scatter stat update"; | |
c07f9fc5 FG |
1951 | |
1952 | if (!is_auth() && lock->get_state() == LOCK_MIX) { | |
1953 | dout(10) << "finish_scatter_update try to assimilate dirty rstat on " | |
1954 | << *dir << dendl; | |
1955 | dir->assimilate_dirty_rstat_inodes(); | |
1956 | } | |
1957 | ||
7c673cae FG |
1958 | break; |
1959 | default: | |
1960 | ceph_abort(); | |
1961 | } | |
1962 | ||
c07f9fc5 | 1963 | pf->version = dir->pre_dirty(); |
7c673cae FG |
1964 | mut->add_projected_fnode(dir); |
1965 | ||
1966 | EUpdate *le = new EUpdate(mdlog, ename); | |
1967 | mdlog->start_entry(le); | |
1968 | le->metablob.add_dir_context(dir); | |
1969 | le->metablob.add_dir(dir, true); | |
1970 | ||
1971 | assert(!dir->is_frozen()); | |
1972 | mut->auth_pin(dir); | |
c07f9fc5 FG |
1973 | |
1974 | if (lock->get_type() == CEPH_LOCK_INEST && | |
1975 | !is_auth() && lock->get_state() == LOCK_MIX) { | |
1976 | dout(10) << "finish_scatter_update finish assimilating dirty rstat on " | |
1977 | << *dir << dendl; | |
1978 | dir->assimilate_dirty_rstat_inodes_finish(mut, &le->metablob); | |
1979 | ||
1980 | if (!(pf->rstat == pf->accounted_rstat)) { | |
1981 | if (mut->wrlocks.count(&nestlock) == 0) { | |
1982 | mdcache->mds->locker->wrlock_force(&nestlock, mut); | |
1983 | } | |
1984 | ||
1985 | mdcache->mds->locker->mark_updated_scatterlock(&nestlock); | |
1986 | mut->ls->dirty_dirfrag_nest.push_back(&item_dirty_dirfrag_nest); | |
1987 | } | |
1988 | } | |
7c673cae FG |
1989 | |
1990 | mdlog->submit_entry(le, new C_Inode_FragUpdate(this, dir, mut)); | |
1991 | } else { | |
1992 | dout(10) << "finish_scatter_update " << fg << " accounted " << *lock | |
1993 | << " scatter stat unchanged at v" << dir_accounted_version << dendl; | |
1994 | } | |
1995 | } | |
1996 | } | |
1997 | ||
1998 | void CInode::_finish_frag_update(CDir *dir, MutationRef& mut) | |
1999 | { | |
2000 | dout(10) << "_finish_frag_update on " << *dir << dendl; | |
2001 | mut->apply(); | |
c07f9fc5 | 2002 | mdcache->mds->locker->drop_locks(mut.get()); |
7c673cae FG |
2003 | mut->cleanup(); |
2004 | } | |
2005 | ||
2006 | ||
2007 | /* | |
2008 | * when we gather a lock, we need to assimilate dirfrag changes into the inode | |
2009 | * state. it's possible we can't update the dirfrag accounted_rstat/fragstat | |
2010 | * because the frag is auth and frozen, or that the replica couldn't for the same | |
2011 | * reason. hopefully it will get updated the next time the lock cycles. | |
2012 | * | |
2013 | * we have two dimensions of behavior: | |
2014 | * - we may be (auth and !frozen), and able to update, or not. | |
2015 | * - the frag may be stale, or not. | |
2016 | * | |
2017 | * if the frag is non-stale, we want to assimilate the diff into the | |
2018 | * inode, regardless of whether it's auth or updateable. | |
2019 | * | |
2020 | * if we update the frag, we want to set accounted_fragstat = frag, | |
2021 | * both if we took the diff or it was stale and we are making it | |
2022 | * un-stale. | |
2023 | */ | |
2024 | /* for more info on scatterlocks, see comments by Locker::scatter_writebehind */ | |
2025 | void CInode::finish_scatter_gather_update(int type) | |
2026 | { | |
2027 | LogChannelRef clog = mdcache->mds->clog; | |
2028 | ||
2029 | dout(10) << "finish_scatter_gather_update " << type << " on " << *this << dendl; | |
2030 | assert(is_auth()); | |
2031 | ||
2032 | switch (type) { | |
2033 | case CEPH_LOCK_IFILE: | |
2034 | { | |
2035 | fragtree_t tmpdft = dirfragtree; | |
2036 | struct frag_info_t dirstat; | |
2037 | bool dirstat_valid = true; | |
2038 | ||
2039 | // adjust summation | |
2040 | assert(is_auth()); | |
2041 | inode_t *pi = get_projected_inode(); | |
2042 | ||
2043 | bool touched_mtime = false, touched_chattr = false; | |
2044 | dout(20) << " orig dirstat " << pi->dirstat << dendl; | |
2045 | pi->dirstat.version++; | |
2046 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
2047 | p != dirfrags.end(); | |
2048 | ++p) { | |
2049 | frag_t fg = p->first; | |
2050 | CDir *dir = p->second; | |
2051 | dout(20) << fg << " " << *dir << dendl; | |
2052 | ||
2053 | bool update; | |
2054 | if (dir->get_version() != 0) { | |
2055 | update = dir->is_auth() && !dir->is_frozen(); | |
2056 | } else { | |
2057 | update = false; | |
2058 | dirstat_valid = false; | |
2059 | } | |
2060 | ||
2061 | fnode_t *pf = dir->get_projected_fnode(); | |
2062 | if (update) | |
2063 | pf = dir->project_fnode(); | |
2064 | ||
2065 | if (pf->accounted_fragstat.version == pi->dirstat.version - 1) { | |
2066 | dout(20) << fg << " fragstat " << pf->fragstat << dendl; | |
2067 | dout(20) << fg << " accounted_fragstat " << pf->accounted_fragstat << dendl; | |
2068 | pi->dirstat.add_delta(pf->fragstat, pf->accounted_fragstat, &touched_mtime, &touched_chattr); | |
2069 | } else { | |
2070 | dout(20) << fg << " skipping STALE accounted_fragstat " << pf->accounted_fragstat << dendl; | |
2071 | } | |
2072 | ||
2073 | if (pf->fragstat.nfiles < 0 || | |
2074 | pf->fragstat.nsubdirs < 0) { | |
2075 | clog->error() << "bad/negative dir size on " | |
2076 | << dir->dirfrag() << " " << pf->fragstat; | |
2077 | assert(!"bad/negative fragstat" == g_conf->mds_verify_scatter); | |
2078 | ||
2079 | if (pf->fragstat.nfiles < 0) | |
2080 | pf->fragstat.nfiles = 0; | |
2081 | if (pf->fragstat.nsubdirs < 0) | |
2082 | pf->fragstat.nsubdirs = 0; | |
2083 | } | |
2084 | ||
2085 | if (update) { | |
2086 | pf->accounted_fragstat = pf->fragstat; | |
2087 | pf->fragstat.version = pf->accounted_fragstat.version = pi->dirstat.version; | |
2088 | dout(10) << fg << " updated accounted_fragstat " << pf->fragstat << " on " << *dir << dendl; | |
2089 | } | |
2090 | ||
2091 | tmpdft.force_to_leaf(g_ceph_context, fg); | |
2092 | dirstat.add(pf->fragstat); | |
2093 | } | |
2094 | if (touched_mtime) | |
2095 | pi->mtime = pi->ctime = pi->dirstat.mtime; | |
2096 | if (touched_chattr) | |
2097 | pi->change_attr = pi->dirstat.change_attr; | |
2098 | dout(20) << " final dirstat " << pi->dirstat << dendl; | |
2099 | ||
2100 | if (dirstat_valid && !dirstat.same_sums(pi->dirstat)) { | |
2101 | list<frag_t> ls; | |
2102 | tmpdft.get_leaves_under(frag_t(), ls); | |
2103 | for (list<frag_t>::iterator p = ls.begin(); p != ls.end(); ++p) | |
2104 | if (!dirfrags.count(*p)) { | |
2105 | dirstat_valid = false; | |
2106 | break; | |
2107 | } | |
2108 | if (dirstat_valid) { | |
2109 | if (state_test(CInode::STATE_REPAIRSTATS)) { | |
2110 | dout(20) << " dirstat mismatch, fixing" << dendl; | |
2111 | } else { | |
2112 | clog->error() << "unmatched fragstat on " << ino() << ", inode has " | |
2113 | << pi->dirstat << ", dirfrags have " << dirstat; | |
2114 | assert(!"unmatched fragstat" == g_conf->mds_verify_scatter); | |
2115 | } | |
2116 | // trust the dirfrags for now | |
2117 | version_t v = pi->dirstat.version; | |
2118 | if (pi->dirstat.mtime > dirstat.mtime) | |
2119 | dirstat.mtime = pi->dirstat.mtime; | |
2120 | if (pi->dirstat.change_attr > dirstat.change_attr) | |
2121 | dirstat.change_attr = pi->dirstat.change_attr; | |
2122 | pi->dirstat = dirstat; | |
2123 | pi->dirstat.version = v; | |
2124 | } | |
2125 | } | |
2126 | ||
2127 | if (pi->dirstat.nfiles < 0 || | |
2128 | pi->dirstat.nsubdirs < 0) { | |
2129 | clog->error() << "bad/negative fragstat on " << ino() | |
2130 | << ", inode has " << pi->dirstat; | |
2131 | assert(!"bad/negative fragstat" == g_conf->mds_verify_scatter); | |
2132 | ||
2133 | if (pi->dirstat.nfiles < 0) | |
2134 | pi->dirstat.nfiles = 0; | |
2135 | if (pi->dirstat.nsubdirs < 0) | |
2136 | pi->dirstat.nsubdirs = 0; | |
2137 | } | |
2138 | } | |
2139 | break; | |
2140 | ||
2141 | case CEPH_LOCK_INEST: | |
2142 | { | |
2143 | fragtree_t tmpdft = dirfragtree; | |
2144 | nest_info_t rstat; | |
2145 | rstat.rsubdirs = 1; | |
2146 | bool rstat_valid = true; | |
2147 | ||
2148 | // adjust summation | |
2149 | assert(is_auth()); | |
2150 | inode_t *pi = get_projected_inode(); | |
2151 | dout(20) << " orig rstat " << pi->rstat << dendl; | |
2152 | pi->rstat.version++; | |
2153 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
2154 | p != dirfrags.end(); | |
2155 | ++p) { | |
2156 | frag_t fg = p->first; | |
2157 | CDir *dir = p->second; | |
2158 | dout(20) << fg << " " << *dir << dendl; | |
2159 | ||
2160 | bool update; | |
2161 | if (dir->get_version() != 0) { | |
2162 | update = dir->is_auth() && !dir->is_frozen(); | |
2163 | } else { | |
2164 | update = false; | |
2165 | rstat_valid = false; | |
2166 | } | |
2167 | ||
2168 | fnode_t *pf = dir->get_projected_fnode(); | |
2169 | if (update) | |
2170 | pf = dir->project_fnode(); | |
2171 | ||
2172 | if (pf->accounted_rstat.version == pi->rstat.version-1) { | |
2173 | // only pull this frag's dirty rstat inodes into the frag if | |
2174 | // the frag is non-stale and updateable. if it's stale, | |
2175 | // that info will just get thrown out! | |
2176 | if (update) | |
2177 | dir->assimilate_dirty_rstat_inodes(); | |
2178 | ||
2179 | dout(20) << fg << " rstat " << pf->rstat << dendl; | |
2180 | dout(20) << fg << " accounted_rstat " << pf->accounted_rstat << dendl; | |
2181 | dout(20) << fg << " dirty_old_rstat " << dir->dirty_old_rstat << dendl; | |
2182 | mdcache->project_rstat_frag_to_inode(pf->rstat, pf->accounted_rstat, | |
2183 | dir->first, CEPH_NOSNAP, this, true); | |
2184 | for (compact_map<snapid_t,old_rstat_t>::iterator q = dir->dirty_old_rstat.begin(); | |
2185 | q != dir->dirty_old_rstat.end(); | |
2186 | ++q) | |
2187 | mdcache->project_rstat_frag_to_inode(q->second.rstat, q->second.accounted_rstat, | |
2188 | q->second.first, q->first, this, true); | |
2189 | if (update) // dir contents not valid if frozen or non-auth | |
2190 | dir->check_rstats(); | |
2191 | } else { | |
2192 | dout(20) << fg << " skipping STALE accounted_rstat " << pf->accounted_rstat << dendl; | |
2193 | } | |
2194 | if (update) { | |
2195 | pf->accounted_rstat = pf->rstat; | |
2196 | dir->dirty_old_rstat.clear(); | |
2197 | pf->rstat.version = pf->accounted_rstat.version = pi->rstat.version; | |
2198 | dir->check_rstats(); | |
2199 | dout(10) << fg << " updated accounted_rstat " << pf->rstat << " on " << *dir << dendl; | |
2200 | } | |
2201 | ||
2202 | tmpdft.force_to_leaf(g_ceph_context, fg); | |
2203 | rstat.add(pf->rstat); | |
2204 | } | |
2205 | dout(20) << " final rstat " << pi->rstat << dendl; | |
2206 | ||
2207 | if (rstat_valid && !rstat.same_sums(pi->rstat)) { | |
2208 | list<frag_t> ls; | |
2209 | tmpdft.get_leaves_under(frag_t(), ls); | |
2210 | for (list<frag_t>::iterator p = ls.begin(); p != ls.end(); ++p) | |
2211 | if (!dirfrags.count(*p)) { | |
2212 | rstat_valid = false; | |
2213 | break; | |
2214 | } | |
2215 | if (rstat_valid) { | |
2216 | if (state_test(CInode::STATE_REPAIRSTATS)) { | |
2217 | dout(20) << " rstat mismatch, fixing" << dendl; | |
2218 | } else { | |
2219 | clog->error() << "unmatched rstat on " << ino() << ", inode has " | |
2220 | << pi->rstat << ", dirfrags have " << rstat; | |
2221 | assert(!"unmatched rstat" == g_conf->mds_verify_scatter); | |
2222 | } | |
2223 | // trust the dirfrag for now | |
2224 | version_t v = pi->rstat.version; | |
2225 | if (pi->rstat.rctime > rstat.rctime) | |
2226 | rstat.rctime = pi->rstat.rctime; | |
2227 | pi->rstat = rstat; | |
2228 | pi->rstat.version = v; | |
2229 | } | |
2230 | } | |
2231 | ||
2232 | mdcache->broadcast_quota_to_client(this); | |
2233 | } | |
2234 | break; | |
2235 | ||
2236 | case CEPH_LOCK_IDFT: | |
2237 | break; | |
2238 | ||
2239 | default: | |
2240 | ceph_abort(); | |
2241 | } | |
2242 | } | |
2243 | ||
2244 | void CInode::finish_scatter_gather_update_accounted(int type, MutationRef& mut, EMetaBlob *metablob) | |
2245 | { | |
2246 | dout(10) << "finish_scatter_gather_update_accounted " << type << " on " << *this << dendl; | |
2247 | assert(is_auth()); | |
2248 | ||
2249 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
2250 | p != dirfrags.end(); | |
2251 | ++p) { | |
2252 | CDir *dir = p->second; | |
2253 | if (!dir->is_auth() || dir->get_version() == 0 || dir->is_frozen()) | |
2254 | continue; | |
2255 | ||
2256 | if (type == CEPH_LOCK_IDFT) | |
2257 | continue; // nothing to do. | |
2258 | ||
2259 | dout(10) << " journaling updated frag accounted_ on " << *dir << dendl; | |
2260 | assert(dir->is_projected()); | |
2261 | fnode_t *pf = dir->get_projected_fnode(); | |
2262 | pf->version = dir->pre_dirty(); | |
2263 | mut->add_projected_fnode(dir); | |
2264 | metablob->add_dir(dir, true); | |
2265 | mut->auth_pin(dir); | |
2266 | ||
2267 | if (type == CEPH_LOCK_INEST) | |
2268 | dir->assimilate_dirty_rstat_inodes_finish(mut, metablob); | |
2269 | } | |
2270 | } | |
2271 | ||
2272 | // waiting | |
2273 | ||
2274 | bool CInode::is_frozen() const | |
2275 | { | |
2276 | if (is_frozen_inode()) return true; | |
2277 | if (parent && parent->dir->is_frozen()) return true; | |
2278 | return false; | |
2279 | } | |
2280 | ||
2281 | bool CInode::is_frozen_dir() const | |
2282 | { | |
2283 | if (parent && parent->dir->is_frozen_dir()) return true; | |
2284 | return false; | |
2285 | } | |
2286 | ||
2287 | bool CInode::is_freezing() const | |
2288 | { | |
2289 | if (is_freezing_inode()) return true; | |
2290 | if (parent && parent->dir->is_freezing()) return true; | |
2291 | return false; | |
2292 | } | |
2293 | ||
2294 | void CInode::add_dir_waiter(frag_t fg, MDSInternalContextBase *c) | |
2295 | { | |
2296 | if (waiting_on_dir.empty()) | |
2297 | get(PIN_DIRWAITER); | |
2298 | waiting_on_dir[fg].push_back(c); | |
2299 | dout(10) << "add_dir_waiter frag " << fg << " " << c << " on " << *this << dendl; | |
2300 | } | |
2301 | ||
2302 | void CInode::take_dir_waiting(frag_t fg, list<MDSInternalContextBase*>& ls) | |
2303 | { | |
2304 | if (waiting_on_dir.empty()) | |
2305 | return; | |
2306 | ||
2307 | compact_map<frag_t, list<MDSInternalContextBase*> >::iterator p = waiting_on_dir.find(fg); | |
2308 | if (p != waiting_on_dir.end()) { | |
2309 | dout(10) << "take_dir_waiting frag " << fg << " on " << *this << dendl; | |
2310 | ls.splice(ls.end(), p->second); | |
2311 | waiting_on_dir.erase(p); | |
2312 | ||
2313 | if (waiting_on_dir.empty()) | |
2314 | put(PIN_DIRWAITER); | |
2315 | } | |
2316 | } | |
2317 | ||
2318 | void CInode::add_waiter(uint64_t tag, MDSInternalContextBase *c) | |
2319 | { | |
2320 | dout(10) << "add_waiter tag " << std::hex << tag << std::dec << " " << c | |
2321 | << " !ambig " << !state_test(STATE_AMBIGUOUSAUTH) | |
2322 | << " !frozen " << !is_frozen_inode() | |
2323 | << " !freezing " << !is_freezing_inode() | |
2324 | << dendl; | |
2325 | // wait on the directory? | |
2326 | // make sure its not the inode that is explicitly ambiguous|freezing|frozen | |
2327 | if (((tag & WAIT_SINGLEAUTH) && !state_test(STATE_AMBIGUOUSAUTH)) || | |
2328 | ((tag & WAIT_UNFREEZE) && | |
2329 | !is_frozen_inode() && !is_freezing_inode() && !is_frozen_auth_pin())) { | |
2330 | dout(15) << "passing waiter up tree" << dendl; | |
2331 | parent->dir->add_waiter(tag, c); | |
2332 | return; | |
2333 | } | |
2334 | dout(15) << "taking waiter here" << dendl; | |
2335 | MDSCacheObject::add_waiter(tag, c); | |
2336 | } | |
2337 | ||
2338 | void CInode::take_waiting(uint64_t mask, list<MDSInternalContextBase*>& ls) | |
2339 | { | |
2340 | if ((mask & WAIT_DIR) && !waiting_on_dir.empty()) { | |
2341 | // take all dentry waiters | |
2342 | while (!waiting_on_dir.empty()) { | |
2343 | compact_map<frag_t, list<MDSInternalContextBase*> >::iterator p = waiting_on_dir.begin(); | |
2344 | dout(10) << "take_waiting dirfrag " << p->first << " on " << *this << dendl; | |
2345 | ls.splice(ls.end(), p->second); | |
2346 | waiting_on_dir.erase(p); | |
2347 | } | |
2348 | put(PIN_DIRWAITER); | |
2349 | } | |
2350 | ||
2351 | // waiting | |
2352 | MDSCacheObject::take_waiting(mask, ls); | |
2353 | } | |
2354 | ||
2355 | bool CInode::freeze_inode(int auth_pin_allowance) | |
2356 | { | |
2357 | assert(auth_pin_allowance > 0); // otherwise we need to adjust parent's nested_auth_pins | |
2358 | assert(auth_pins >= auth_pin_allowance); | |
2359 | if (auth_pins > auth_pin_allowance) { | |
2360 | dout(10) << "freeze_inode - waiting for auth_pins to drop to " << auth_pin_allowance << dendl; | |
2361 | auth_pin_freeze_allowance = auth_pin_allowance; | |
2362 | get(PIN_FREEZING); | |
2363 | state_set(STATE_FREEZING); | |
2364 | return false; | |
2365 | } | |
2366 | ||
2367 | dout(10) << "freeze_inode - frozen" << dendl; | |
2368 | assert(auth_pins == auth_pin_allowance); | |
2369 | if (!state_test(STATE_FROZEN)) { | |
2370 | get(PIN_FROZEN); | |
2371 | state_set(STATE_FROZEN); | |
2372 | } | |
2373 | return true; | |
2374 | } | |
2375 | ||
2376 | void CInode::unfreeze_inode(list<MDSInternalContextBase*>& finished) | |
2377 | { | |
2378 | dout(10) << "unfreeze_inode" << dendl; | |
2379 | if (state_test(STATE_FREEZING)) { | |
2380 | state_clear(STATE_FREEZING); | |
2381 | put(PIN_FREEZING); | |
2382 | } else if (state_test(STATE_FROZEN)) { | |
2383 | state_clear(STATE_FROZEN); | |
2384 | put(PIN_FROZEN); | |
2385 | } else | |
2386 | ceph_abort(); | |
2387 | take_waiting(WAIT_UNFREEZE, finished); | |
2388 | } | |
2389 | ||
2390 | void CInode::unfreeze_inode() | |
2391 | { | |
2392 | list<MDSInternalContextBase*> finished; | |
2393 | unfreeze_inode(finished); | |
2394 | mdcache->mds->queue_waiters(finished); | |
2395 | } | |
2396 | ||
2397 | void CInode::freeze_auth_pin() | |
2398 | { | |
2399 | assert(state_test(CInode::STATE_FROZEN)); | |
2400 | state_set(CInode::STATE_FROZENAUTHPIN); | |
2401 | } | |
2402 | ||
2403 | void CInode::unfreeze_auth_pin() | |
2404 | { | |
2405 | assert(state_test(CInode::STATE_FROZENAUTHPIN)); | |
2406 | state_clear(CInode::STATE_FROZENAUTHPIN); | |
2407 | if (!state_test(STATE_FREEZING|STATE_FROZEN)) { | |
2408 | list<MDSInternalContextBase*> finished; | |
2409 | take_waiting(WAIT_UNFREEZE, finished); | |
2410 | mdcache->mds->queue_waiters(finished); | |
2411 | } | |
2412 | } | |
2413 | ||
2414 | void CInode::clear_ambiguous_auth(list<MDSInternalContextBase*>& finished) | |
2415 | { | |
2416 | assert(state_test(CInode::STATE_AMBIGUOUSAUTH)); | |
2417 | state_clear(CInode::STATE_AMBIGUOUSAUTH); | |
2418 | take_waiting(CInode::WAIT_SINGLEAUTH, finished); | |
2419 | } | |
2420 | ||
2421 | void CInode::clear_ambiguous_auth() | |
2422 | { | |
2423 | list<MDSInternalContextBase*> finished; | |
2424 | clear_ambiguous_auth(finished); | |
2425 | mdcache->mds->queue_waiters(finished); | |
2426 | } | |
2427 | ||
2428 | // auth_pins | |
2429 | bool CInode::can_auth_pin() const { | |
2430 | if (!is_auth() || is_freezing_inode() || is_frozen_inode() || is_frozen_auth_pin()) | |
2431 | return false; | |
2432 | if (parent) | |
2433 | return parent->can_auth_pin(); | |
2434 | return true; | |
2435 | } | |
2436 | ||
2437 | void CInode::auth_pin(void *by) | |
2438 | { | |
2439 | if (auth_pins == 0) | |
2440 | get(PIN_AUTHPIN); | |
2441 | auth_pins++; | |
2442 | ||
2443 | #ifdef MDS_AUTHPIN_SET | |
2444 | auth_pin_set.insert(by); | |
2445 | #endif | |
2446 | ||
2447 | dout(10) << "auth_pin by " << by << " on " << *this | |
2448 | << " now " << auth_pins << "+" << nested_auth_pins | |
2449 | << dendl; | |
2450 | ||
2451 | if (parent) | |
2452 | parent->adjust_nested_auth_pins(1, 1, this); | |
2453 | } | |
2454 | ||
2455 | void CInode::auth_unpin(void *by) | |
2456 | { | |
2457 | auth_pins--; | |
2458 | ||
2459 | #ifdef MDS_AUTHPIN_SET | |
2460 | assert(auth_pin_set.count(by)); | |
2461 | auth_pin_set.erase(auth_pin_set.find(by)); | |
2462 | #endif | |
2463 | ||
2464 | if (auth_pins == 0) | |
2465 | put(PIN_AUTHPIN); | |
2466 | ||
2467 | dout(10) << "auth_unpin by " << by << " on " << *this | |
2468 | << " now " << auth_pins << "+" << nested_auth_pins | |
2469 | << dendl; | |
2470 | ||
2471 | assert(auth_pins >= 0); | |
2472 | ||
2473 | if (parent) | |
2474 | parent->adjust_nested_auth_pins(-1, -1, by); | |
2475 | ||
2476 | if (is_freezing_inode() && | |
2477 | auth_pins == auth_pin_freeze_allowance) { | |
2478 | dout(10) << "auth_unpin freezing!" << dendl; | |
2479 | get(PIN_FROZEN); | |
2480 | put(PIN_FREEZING); | |
2481 | state_clear(STATE_FREEZING); | |
2482 | state_set(STATE_FROZEN); | |
2483 | finish_waiting(WAIT_FROZEN); | |
2484 | } | |
2485 | } | |
2486 | ||
2487 | void CInode::adjust_nested_auth_pins(int a, void *by) | |
2488 | { | |
2489 | assert(a); | |
2490 | nested_auth_pins += a; | |
2491 | dout(35) << "adjust_nested_auth_pins by " << by | |
2492 | << " change " << a << " yields " | |
2493 | << auth_pins << "+" << nested_auth_pins << dendl; | |
2494 | assert(nested_auth_pins >= 0); | |
2495 | ||
2496 | if (g_conf->mds_debug_auth_pins) { | |
2497 | // audit | |
2498 | int s = 0; | |
2499 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
2500 | p != dirfrags.end(); | |
2501 | ++p) { | |
2502 | CDir *dir = p->second; | |
2503 | if (!dir->is_subtree_root() && dir->get_cum_auth_pins()) | |
2504 | s++; | |
2505 | } | |
2506 | assert(s == nested_auth_pins); | |
2507 | } | |
2508 | ||
2509 | if (parent) | |
2510 | parent->adjust_nested_auth_pins(a, 0, by); | |
2511 | } | |
2512 | ||
2513 | ||
2514 | // authority | |
2515 | ||
2516 | mds_authority_t CInode::authority() const | |
2517 | { | |
2518 | if (inode_auth.first >= 0) | |
2519 | return inode_auth; | |
2520 | ||
2521 | if (parent) | |
2522 | return parent->dir->authority(); | |
2523 | ||
2524 | // new items that are not yet linked in (in the committed plane) belong | |
2525 | // to their first parent. | |
2526 | if (!projected_parent.empty()) | |
2527 | return projected_parent.front()->dir->authority(); | |
2528 | ||
2529 | return CDIR_AUTH_UNDEF; | |
2530 | } | |
2531 | ||
2532 | ||
2533 | // SNAP | |
2534 | ||
2535 | snapid_t CInode::get_oldest_snap() | |
2536 | { | |
2537 | snapid_t t = first; | |
2538 | if (!old_inodes.empty()) | |
2539 | t = old_inodes.begin()->second.first; | |
2540 | return MIN(t, oldest_snap); | |
2541 | } | |
2542 | ||
2543 | old_inode_t& CInode::cow_old_inode(snapid_t follows, bool cow_head) | |
2544 | { | |
2545 | assert(follows >= first); | |
2546 | ||
2547 | inode_t *pi = cow_head ? get_projected_inode() : get_previous_projected_inode(); | |
2548 | map<string,bufferptr> *px = cow_head ? get_projected_xattrs() : get_previous_projected_xattrs(); | |
2549 | ||
2550 | old_inode_t &old = old_inodes[follows]; | |
2551 | old.first = first; | |
2552 | old.inode = *pi; | |
2553 | old.xattrs = *px; | |
2554 | ||
2555 | if (first < oldest_snap) | |
2556 | oldest_snap = first; | |
2557 | ||
2558 | dout(10) << " " << px->size() << " xattrs cowed, " << *px << dendl; | |
2559 | ||
2560 | old.inode.trim_client_ranges(follows); | |
2561 | ||
2562 | if (g_conf->mds_snap_rstat && | |
2563 | !(old.inode.rstat == old.inode.accounted_rstat)) | |
2564 | dirty_old_rstats.insert(follows); | |
2565 | ||
2566 | first = follows+1; | |
2567 | ||
2568 | dout(10) << "cow_old_inode " << (cow_head ? "head" : "previous_head" ) | |
2569 | << " to [" << old.first << "," << follows << "] on " | |
2570 | << *this << dendl; | |
2571 | ||
2572 | return old; | |
2573 | } | |
2574 | ||
2575 | void CInode::split_old_inode(snapid_t snap) | |
2576 | { | |
2577 | compact_map<snapid_t, old_inode_t>::iterator p = old_inodes.lower_bound(snap); | |
2578 | assert(p != old_inodes.end() && p->second.first < snap); | |
2579 | ||
2580 | old_inode_t &old = old_inodes[snap - 1]; | |
2581 | old = p->second; | |
2582 | ||
2583 | p->second.first = snap; | |
2584 | dout(10) << "split_old_inode " << "[" << old.first << "," << p->first | |
2585 | << "] to [" << snap << "," << p->first << "] on " << *this << dendl; | |
2586 | } | |
2587 | ||
2588 | void CInode::pre_cow_old_inode() | |
2589 | { | |
2590 | snapid_t follows = find_snaprealm()->get_newest_seq(); | |
2591 | if (first <= follows) | |
2592 | cow_old_inode(follows, true); | |
2593 | } | |
2594 | ||
2595 | void CInode::purge_stale_snap_data(const set<snapid_t>& snaps) | |
2596 | { | |
2597 | dout(10) << "purge_stale_snap_data " << snaps << dendl; | |
2598 | ||
2599 | if (old_inodes.empty()) | |
2600 | return; | |
2601 | ||
2602 | compact_map<snapid_t,old_inode_t>::iterator p = old_inodes.begin(); | |
2603 | while (p != old_inodes.end()) { | |
2604 | set<snapid_t>::const_iterator q = snaps.lower_bound(p->second.first); | |
2605 | if (q == snaps.end() || *q > p->first) { | |
2606 | dout(10) << " purging old_inode [" << p->second.first << "," << p->first << "]" << dendl; | |
2607 | old_inodes.erase(p++); | |
2608 | } else | |
2609 | ++p; | |
2610 | } | |
2611 | } | |
2612 | ||
2613 | /* | |
2614 | * pick/create an old_inode | |
2615 | */ | |
2616 | old_inode_t * CInode::pick_old_inode(snapid_t snap) | |
2617 | { | |
2618 | compact_map<snapid_t, old_inode_t>::iterator p = old_inodes.lower_bound(snap); // p is first key >= to snap | |
2619 | if (p != old_inodes.end() && p->second.first <= snap) { | |
2620 | dout(10) << "pick_old_inode snap " << snap << " -> [" << p->second.first << "," << p->first << "]" << dendl; | |
2621 | return &p->second; | |
2622 | } | |
2623 | dout(10) << "pick_old_inode snap " << snap << " -> nothing" << dendl; | |
2624 | return NULL; | |
2625 | } | |
2626 | ||
2627 | void CInode::open_snaprealm(bool nosplit) | |
2628 | { | |
2629 | if (!snaprealm) { | |
2630 | SnapRealm *parent = find_snaprealm(); | |
2631 | snaprealm = new SnapRealm(mdcache, this); | |
2632 | if (parent) { | |
2633 | dout(10) << "open_snaprealm " << snaprealm | |
2634 | << " parent is " << parent | |
2635 | << dendl; | |
2636 | dout(30) << " siblings are " << parent->open_children << dendl; | |
2637 | snaprealm->parent = parent; | |
2638 | if (!nosplit) | |
2639 | parent->split_at(snaprealm); | |
2640 | parent->open_children.insert(snaprealm); | |
2641 | } | |
2642 | } | |
2643 | } | |
2644 | void CInode::close_snaprealm(bool nojoin) | |
2645 | { | |
2646 | if (snaprealm) { | |
2647 | dout(15) << "close_snaprealm " << *snaprealm << dendl; | |
2648 | snaprealm->close_parents(); | |
2649 | if (snaprealm->parent) { | |
2650 | snaprealm->parent->open_children.erase(snaprealm); | |
2651 | //if (!nojoin) | |
2652 | //snaprealm->parent->join(snaprealm); | |
2653 | } | |
2654 | delete snaprealm; | |
2655 | snaprealm = 0; | |
2656 | } | |
2657 | } | |
2658 | ||
2659 | SnapRealm *CInode::find_snaprealm() const | |
2660 | { | |
2661 | const CInode *cur = this; | |
2662 | while (!cur->snaprealm) { | |
2663 | if (cur->get_parent_dn()) | |
2664 | cur = cur->get_parent_dn()->get_dir()->get_inode(); | |
2665 | else if (get_projected_parent_dn()) | |
2666 | cur = cur->get_projected_parent_dn()->get_dir()->get_inode(); | |
2667 | else | |
2668 | break; | |
2669 | } | |
2670 | return cur->snaprealm; | |
2671 | } | |
2672 | ||
2673 | void CInode::encode_snap_blob(bufferlist &snapbl) | |
2674 | { | |
2675 | if (snaprealm) { | |
2676 | ::encode(snaprealm->srnode, snapbl); | |
2677 | dout(20) << "encode_snap_blob " << *snaprealm << dendl; | |
2678 | } | |
2679 | } | |
2680 | void CInode::decode_snap_blob(bufferlist& snapbl) | |
2681 | { | |
2682 | if (snapbl.length()) { | |
2683 | open_snaprealm(); | |
2684 | bufferlist::iterator p = snapbl.begin(); | |
2685 | ::decode(snaprealm->srnode, p); | |
2686 | if (is_base()) { | |
2687 | bool ok = snaprealm->_open_parents(NULL); | |
2688 | assert(ok); | |
2689 | } | |
2690 | dout(20) << "decode_snap_blob " << *snaprealm << dendl; | |
2691 | } | |
2692 | } | |
2693 | ||
2694 | void CInode::encode_snap(bufferlist& bl) | |
2695 | { | |
2696 | bufferlist snapbl; | |
2697 | encode_snap_blob(snapbl); | |
2698 | ::encode(snapbl, bl); | |
2699 | ::encode(oldest_snap, bl); | |
2700 | } | |
2701 | ||
2702 | void CInode::decode_snap(bufferlist::iterator& p) | |
2703 | { | |
2704 | bufferlist snapbl; | |
2705 | ::decode(snapbl, p); | |
2706 | ::decode(oldest_snap, p); | |
2707 | decode_snap_blob(snapbl); | |
2708 | } | |
2709 | ||
2710 | // ============================================= | |
2711 | ||
2712 | client_t CInode::calc_ideal_loner() | |
2713 | { | |
2714 | if (mdcache->is_readonly()) | |
2715 | return -1; | |
2716 | if (!mds_caps_wanted.empty()) | |
2717 | return -1; | |
2718 | ||
2719 | int n = 0; | |
2720 | client_t loner = -1; | |
2721 | for (map<client_t,Capability*>::iterator it = client_caps.begin(); | |
2722 | it != client_caps.end(); | |
2723 | ++it) | |
2724 | if (!it->second->is_stale() && | |
2725 | ((it->second->wanted() & (CEPH_CAP_ANY_WR|CEPH_CAP_FILE_WR|CEPH_CAP_FILE_RD)) || | |
2726 | (inode.is_dir() && !has_subtree_root_dirfrag()))) { | |
2727 | if (n) | |
2728 | return -1; | |
2729 | n++; | |
2730 | loner = it->first; | |
2731 | } | |
2732 | return loner; | |
2733 | } | |
2734 | ||
2735 | client_t CInode::choose_ideal_loner() | |
2736 | { | |
2737 | want_loner_cap = calc_ideal_loner(); | |
2738 | return want_loner_cap; | |
2739 | } | |
2740 | ||
2741 | bool CInode::try_set_loner() | |
2742 | { | |
2743 | assert(want_loner_cap >= 0); | |
2744 | if (loner_cap >= 0 && loner_cap != want_loner_cap) | |
2745 | return false; | |
2746 | set_loner_cap(want_loner_cap); | |
2747 | return true; | |
2748 | } | |
2749 | ||
2750 | void CInode::set_loner_cap(client_t l) | |
2751 | { | |
2752 | loner_cap = l; | |
2753 | authlock.set_excl_client(loner_cap); | |
2754 | filelock.set_excl_client(loner_cap); | |
2755 | linklock.set_excl_client(loner_cap); | |
2756 | xattrlock.set_excl_client(loner_cap); | |
2757 | } | |
2758 | ||
2759 | bool CInode::try_drop_loner() | |
2760 | { | |
2761 | if (loner_cap < 0) | |
2762 | return true; | |
2763 | ||
2764 | int other_allowed = get_caps_allowed_by_type(CAP_ANY); | |
2765 | Capability *cap = get_client_cap(loner_cap); | |
2766 | if (!cap || | |
2767 | (cap->issued() & ~other_allowed) == 0) { | |
2768 | set_loner_cap(-1); | |
2769 | return true; | |
2770 | } | |
2771 | return false; | |
2772 | } | |
2773 | ||
2774 | ||
2775 | // choose new lock state during recovery, based on issued caps | |
2776 | void CInode::choose_lock_state(SimpleLock *lock, int allissued) | |
2777 | { | |
2778 | int shift = lock->get_cap_shift(); | |
2779 | int issued = (allissued >> shift) & lock->get_cap_mask(); | |
2780 | if (is_auth()) { | |
2781 | if (lock->is_xlocked()) { | |
2782 | // do nothing here | |
2783 | } else if (lock->get_state() != LOCK_MIX) { | |
2784 | if (issued & (CEPH_CAP_GEXCL | CEPH_CAP_GBUFFER)) | |
2785 | lock->set_state(LOCK_EXCL); | |
2786 | else if (issued & CEPH_CAP_GWR) | |
2787 | lock->set_state(LOCK_MIX); | |
2788 | else if (lock->is_dirty()) { | |
2789 | if (is_replicated()) | |
2790 | lock->set_state(LOCK_MIX); | |
2791 | else | |
2792 | lock->set_state(LOCK_LOCK); | |
2793 | } else | |
2794 | lock->set_state(LOCK_SYNC); | |
2795 | } | |
2796 | } else { | |
2797 | // our states have already been chosen during rejoin. | |
2798 | if (lock->is_xlocked()) | |
2799 | assert(lock->get_state() == LOCK_LOCK); | |
2800 | } | |
2801 | } | |
2802 | ||
2803 | void CInode::choose_lock_states(int dirty_caps) | |
2804 | { | |
2805 | int issued = get_caps_issued() | dirty_caps; | |
2806 | if (is_auth() && (issued & (CEPH_CAP_ANY_EXCL|CEPH_CAP_ANY_WR)) && | |
2807 | choose_ideal_loner() >= 0) | |
2808 | try_set_loner(); | |
2809 | choose_lock_state(&filelock, issued); | |
2810 | choose_lock_state(&nestlock, issued); | |
2811 | choose_lock_state(&dirfragtreelock, issued); | |
2812 | choose_lock_state(&authlock, issued); | |
2813 | choose_lock_state(&xattrlock, issued); | |
2814 | choose_lock_state(&linklock, issued); | |
2815 | } | |
2816 | ||
2817 | Capability *CInode::add_client_cap(client_t client, Session *session, SnapRealm *conrealm) | |
2818 | { | |
2819 | if (client_caps.empty()) { | |
2820 | get(PIN_CAPS); | |
2821 | if (conrealm) | |
2822 | containing_realm = conrealm; | |
2823 | else | |
2824 | containing_realm = find_snaprealm(); | |
2825 | containing_realm->inodes_with_caps.push_back(&item_caps); | |
2826 | dout(10) << "add_client_cap first cap, joining realm " << *containing_realm << dendl; | |
2827 | } | |
2828 | ||
2829 | if (client_caps.empty()) | |
2830 | mdcache->num_inodes_with_caps++; | |
2831 | ||
2832 | Capability *cap = new Capability(this, ++mdcache->last_cap_id, client); | |
2833 | assert(client_caps.count(client) == 0); | |
2834 | client_caps[client] = cap; | |
2835 | ||
2836 | session->add_cap(cap); | |
2837 | if (session->is_stale()) | |
2838 | cap->mark_stale(); | |
2839 | ||
2840 | cap->client_follows = first-1; | |
2841 | ||
2842 | containing_realm->add_cap(client, cap); | |
2843 | ||
2844 | return cap; | |
2845 | } | |
2846 | ||
2847 | void CInode::remove_client_cap(client_t client) | |
2848 | { | |
2849 | assert(client_caps.count(client) == 1); | |
2850 | Capability *cap = client_caps[client]; | |
2851 | ||
2852 | cap->item_session_caps.remove_myself(); | |
2853 | cap->item_revoking_caps.remove_myself(); | |
2854 | cap->item_client_revoking_caps.remove_myself(); | |
2855 | containing_realm->remove_cap(client, cap); | |
2856 | ||
2857 | if (client == loner_cap) | |
2858 | loner_cap = -1; | |
2859 | ||
2860 | delete cap; | |
2861 | client_caps.erase(client); | |
2862 | if (client_caps.empty()) { | |
2863 | dout(10) << "remove_client_cap last cap, leaving realm " << *containing_realm << dendl; | |
2864 | put(PIN_CAPS); | |
2865 | item_caps.remove_myself(); | |
2866 | containing_realm = NULL; | |
2867 | item_open_file.remove_myself(); // unpin logsegment | |
2868 | mdcache->num_inodes_with_caps--; | |
2869 | } | |
2870 | ||
2871 | //clean up advisory locks | |
2872 | bool fcntl_removed = fcntl_locks ? fcntl_locks->remove_all_from(client) : false; | |
2873 | bool flock_removed = flock_locks ? flock_locks->remove_all_from(client) : false; | |
2874 | if (fcntl_removed || flock_removed) { | |
2875 | list<MDSInternalContextBase*> waiters; | |
2876 | take_waiting(CInode::WAIT_FLOCK, waiters); | |
2877 | mdcache->mds->queue_waiters(waiters); | |
2878 | } | |
2879 | } | |
2880 | ||
2881 | void CInode::move_to_realm(SnapRealm *realm) | |
2882 | { | |
2883 | dout(10) << "move_to_realm joining realm " << *realm | |
2884 | << ", leaving realm " << *containing_realm << dendl; | |
2885 | for (map<client_t,Capability*>::iterator q = client_caps.begin(); | |
2886 | q != client_caps.end(); | |
2887 | ++q) { | |
2888 | containing_realm->remove_cap(q->first, q->second); | |
2889 | realm->add_cap(q->first, q->second); | |
2890 | } | |
2891 | item_caps.remove_myself(); | |
2892 | realm->inodes_with_caps.push_back(&item_caps); | |
2893 | containing_realm = realm; | |
2894 | } | |
2895 | ||
2896 | Capability *CInode::reconnect_cap(client_t client, const cap_reconnect_t& icr, Session *session) | |
2897 | { | |
2898 | Capability *cap = get_client_cap(client); | |
2899 | if (cap) { | |
2900 | // FIXME? | |
2901 | cap->merge(icr.capinfo.wanted, icr.capinfo.issued); | |
2902 | } else { | |
2903 | cap = add_client_cap(client, session); | |
2904 | cap->set_cap_id(icr.capinfo.cap_id); | |
2905 | cap->set_wanted(icr.capinfo.wanted); | |
2906 | cap->issue_norevoke(icr.capinfo.issued); | |
2907 | cap->reset_seq(); | |
2908 | } | |
2909 | cap->set_last_issue_stamp(ceph_clock_now()); | |
2910 | return cap; | |
2911 | } | |
2912 | ||
2913 | void CInode::clear_client_caps_after_export() | |
2914 | { | |
2915 | while (!client_caps.empty()) | |
2916 | remove_client_cap(client_caps.begin()->first); | |
2917 | loner_cap = -1; | |
2918 | want_loner_cap = -1; | |
2919 | mds_caps_wanted.clear(); | |
2920 | } | |
2921 | ||
2922 | void CInode::export_client_caps(map<client_t,Capability::Export>& cl) | |
2923 | { | |
2924 | for (map<client_t,Capability*>::iterator it = client_caps.begin(); | |
2925 | it != client_caps.end(); | |
2926 | ++it) { | |
2927 | cl[it->first] = it->second->make_export(); | |
2928 | } | |
2929 | } | |
2930 | ||
2931 | // caps allowed | |
2932 | int CInode::get_caps_liked() const | |
2933 | { | |
2934 | if (is_dir()) | |
2935 | return CEPH_CAP_PIN | CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_SHARED; // but not, say, FILE_RD|WR|WRBUFFER | |
2936 | else | |
2937 | return CEPH_CAP_ANY & ~CEPH_CAP_FILE_LAZYIO; | |
2938 | } | |
2939 | ||
2940 | int CInode::get_caps_allowed_ever() const | |
2941 | { | |
2942 | int allowed; | |
2943 | if (is_dir()) | |
2944 | allowed = CEPH_CAP_PIN | CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_SHARED; | |
2945 | else | |
2946 | allowed = CEPH_CAP_ANY; | |
2947 | return allowed & | |
2948 | (CEPH_CAP_PIN | | |
2949 | (filelock.gcaps_allowed_ever() << filelock.get_cap_shift()) | | |
2950 | (authlock.gcaps_allowed_ever() << authlock.get_cap_shift()) | | |
2951 | (xattrlock.gcaps_allowed_ever() << xattrlock.get_cap_shift()) | | |
2952 | (linklock.gcaps_allowed_ever() << linklock.get_cap_shift())); | |
2953 | } | |
2954 | ||
2955 | int CInode::get_caps_allowed_by_type(int type) const | |
2956 | { | |
2957 | return | |
2958 | CEPH_CAP_PIN | | |
2959 | (filelock.gcaps_allowed(type) << filelock.get_cap_shift()) | | |
2960 | (authlock.gcaps_allowed(type) << authlock.get_cap_shift()) | | |
2961 | (xattrlock.gcaps_allowed(type) << xattrlock.get_cap_shift()) | | |
2962 | (linklock.gcaps_allowed(type) << linklock.get_cap_shift()); | |
2963 | } | |
2964 | ||
2965 | int CInode::get_caps_careful() const | |
2966 | { | |
2967 | return | |
2968 | (filelock.gcaps_careful() << filelock.get_cap_shift()) | | |
2969 | (authlock.gcaps_careful() << authlock.get_cap_shift()) | | |
2970 | (xattrlock.gcaps_careful() << xattrlock.get_cap_shift()) | | |
2971 | (linklock.gcaps_careful() << linklock.get_cap_shift()); | |
2972 | } | |
2973 | ||
2974 | int CInode::get_xlocker_mask(client_t client) const | |
2975 | { | |
2976 | return | |
2977 | (filelock.gcaps_xlocker_mask(client) << filelock.get_cap_shift()) | | |
2978 | (authlock.gcaps_xlocker_mask(client) << authlock.get_cap_shift()) | | |
2979 | (xattrlock.gcaps_xlocker_mask(client) << xattrlock.get_cap_shift()) | | |
2980 | (linklock.gcaps_xlocker_mask(client) << linklock.get_cap_shift()); | |
2981 | } | |
2982 | ||
2983 | int CInode::get_caps_allowed_for_client(Session *session, inode_t *file_i) const | |
2984 | { | |
2985 | client_t client = session->info.inst.name.num(); | |
2986 | int allowed; | |
2987 | if (client == get_loner()) { | |
2988 | // as the loner, we get the loner_caps AND any xlocker_caps for things we have xlocked | |
2989 | allowed = | |
2990 | get_caps_allowed_by_type(CAP_LONER) | | |
2991 | (get_caps_allowed_by_type(CAP_XLOCKER) & get_xlocker_mask(client)); | |
2992 | } else { | |
2993 | allowed = get_caps_allowed_by_type(CAP_ANY); | |
2994 | } | |
2995 | ||
2996 | if (!is_dir()) { | |
2997 | if ((file_i->inline_data.version != CEPH_INLINE_NONE && | |
2998 | !session->connection->has_feature(CEPH_FEATURE_MDS_INLINE_DATA)) || | |
2999 | (!file_i->layout.pool_ns.empty() && | |
3000 | !session->connection->has_feature(CEPH_FEATURE_FS_FILE_LAYOUT_V2))) | |
3001 | allowed &= ~(CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR); | |
3002 | } | |
3003 | return allowed; | |
3004 | } | |
3005 | ||
3006 | // caps issued, wanted | |
3007 | int CInode::get_caps_issued(int *ploner, int *pother, int *pxlocker, | |
3008 | int shift, int mask) | |
3009 | { | |
3010 | int c = 0; | |
3011 | int loner = 0, other = 0, xlocker = 0; | |
3012 | if (!is_auth()) { | |
3013 | loner_cap = -1; | |
3014 | } | |
3015 | ||
3016 | for (map<client_t,Capability*>::const_iterator it = client_caps.begin(); | |
3017 | it != client_caps.end(); | |
3018 | ++it) { | |
3019 | int i = it->second->issued(); | |
3020 | c |= i; | |
3021 | if (it->first == loner_cap) | |
3022 | loner |= i; | |
3023 | else | |
3024 | other |= i; | |
3025 | xlocker |= get_xlocker_mask(it->first) & i; | |
3026 | } | |
3027 | if (ploner) *ploner = (loner >> shift) & mask; | |
3028 | if (pother) *pother = (other >> shift) & mask; | |
3029 | if (pxlocker) *pxlocker = (xlocker >> shift) & mask; | |
3030 | return (c >> shift) & mask; | |
3031 | } | |
3032 | ||
3033 | bool CInode::is_any_caps_wanted() const | |
3034 | { | |
3035 | for (map<client_t,Capability*>::const_iterator it = client_caps.begin(); | |
3036 | it != client_caps.end(); | |
3037 | ++it) | |
3038 | if (it->second->wanted()) | |
3039 | return true; | |
3040 | return false; | |
3041 | } | |
3042 | ||
3043 | int CInode::get_caps_wanted(int *ploner, int *pother, int shift, int mask) const | |
3044 | { | |
3045 | int w = 0; | |
3046 | int loner = 0, other = 0; | |
3047 | for (map<client_t,Capability*>::const_iterator it = client_caps.begin(); | |
3048 | it != client_caps.end(); | |
3049 | ++it) { | |
3050 | if (!it->second->is_stale()) { | |
3051 | int t = it->second->wanted(); | |
3052 | w |= t; | |
3053 | if (it->first == loner_cap) | |
3054 | loner |= t; | |
3055 | else | |
3056 | other |= t; | |
3057 | } | |
3058 | //cout << " get_caps_wanted client " << it->first << " " << cap_string(it->second.wanted()) << endl; | |
3059 | } | |
3060 | if (is_auth()) | |
3061 | for (compact_map<int,int>::const_iterator it = mds_caps_wanted.begin(); | |
3062 | it != mds_caps_wanted.end(); | |
3063 | ++it) { | |
3064 | w |= it->second; | |
3065 | other |= it->second; | |
3066 | //cout << " get_caps_wanted mds " << it->first << " " << cap_string(it->second) << endl; | |
3067 | } | |
3068 | if (ploner) *ploner = (loner >> shift) & mask; | |
3069 | if (pother) *pother = (other >> shift) & mask; | |
3070 | return (w >> shift) & mask; | |
3071 | } | |
3072 | ||
3073 | bool CInode::issued_caps_need_gather(SimpleLock *lock) | |
3074 | { | |
3075 | int loner_issued, other_issued, xlocker_issued; | |
3076 | get_caps_issued(&loner_issued, &other_issued, &xlocker_issued, | |
3077 | lock->get_cap_shift(), lock->get_cap_mask()); | |
3078 | if ((loner_issued & ~lock->gcaps_allowed(CAP_LONER)) || | |
3079 | (other_issued & ~lock->gcaps_allowed(CAP_ANY)) || | |
3080 | (xlocker_issued & ~lock->gcaps_allowed(CAP_XLOCKER))) | |
3081 | return true; | |
3082 | return false; | |
3083 | } | |
3084 | ||
3085 | void CInode::replicate_relax_locks() | |
3086 | { | |
3087 | //dout(10) << " relaxing locks on " << *this << dendl; | |
3088 | assert(is_auth()); | |
3089 | assert(!is_replicated()); | |
3090 | ||
3091 | authlock.replicate_relax(); | |
3092 | linklock.replicate_relax(); | |
3093 | dirfragtreelock.replicate_relax(); | |
3094 | filelock.replicate_relax(); | |
3095 | xattrlock.replicate_relax(); | |
3096 | snaplock.replicate_relax(); | |
3097 | nestlock.replicate_relax(); | |
3098 | flocklock.replicate_relax(); | |
3099 | policylock.replicate_relax(); | |
3100 | } | |
3101 | ||
3102 | ||
3103 | ||
3104 | // ============================================= | |
3105 | ||
3106 | int CInode::encode_inodestat(bufferlist& bl, Session *session, | |
3107 | SnapRealm *dir_realm, | |
3108 | snapid_t snapid, | |
3109 | unsigned max_bytes, | |
3110 | int getattr_caps) | |
3111 | { | |
31f18b77 | 3112 | client_t client = session->info.inst.name.num(); |
7c673cae FG |
3113 | assert(snapid); |
3114 | assert(session->connection); | |
3115 | ||
3116 | bool valid = true; | |
3117 | ||
3118 | // pick a version! | |
3119 | inode_t *oi = &inode; | |
3120 | inode_t *pi = get_projected_inode(); | |
3121 | ||
3122 | map<string, bufferptr> *pxattrs = 0; | |
3123 | ||
3124 | if (snapid != CEPH_NOSNAP) { | |
3125 | ||
3126 | // for now at least, old_inodes is only defined/valid on the auth | |
3127 | if (!is_auth()) | |
3128 | valid = false; | |
3129 | ||
3130 | if (is_multiversion()) { | |
3131 | compact_map<snapid_t,old_inode_t>::iterator p = old_inodes.lower_bound(snapid); | |
3132 | if (p != old_inodes.end()) { | |
3133 | if (p->second.first > snapid) { | |
3134 | if (p != old_inodes.begin()) | |
3135 | --p; | |
3136 | } | |
3137 | if (p->second.first <= snapid && snapid <= p->first) { | |
3138 | dout(15) << "encode_inodestat snapid " << snapid | |
3139 | << " to old_inode [" << p->second.first << "," << p->first << "]" | |
3140 | << " " << p->second.inode.rstat | |
3141 | << dendl; | |
3142 | pi = oi = &p->second.inode; | |
3143 | pxattrs = &p->second.xattrs; | |
3144 | } else { | |
3145 | // snapshoted remote dentry can result this | |
3146 | dout(0) << "encode_inodestat old_inode for snapid " << snapid | |
3147 | << " not found" << dendl; | |
3148 | } | |
3149 | } | |
3150 | } else if (snapid < first || snapid > last) { | |
3151 | // snapshoted remote dentry can result this | |
3152 | dout(0) << "encode_inodestat [" << first << "," << last << "]" | |
3153 | << " not match snapid " << snapid << dendl; | |
3154 | } | |
3155 | } | |
3156 | ||
3157 | SnapRealm *realm = find_snaprealm(); | |
3158 | ||
3159 | bool no_caps = !valid || | |
3160 | session->is_stale() || | |
3161 | (dir_realm && realm != dir_realm) || | |
3162 | is_frozen() || | |
3163 | state_test(CInode::STATE_EXPORTINGCAPS); | |
3164 | if (no_caps) | |
3165 | dout(20) << "encode_inodestat no caps" | |
3166 | << (!valid?", !valid":"") | |
3167 | << (session->is_stale()?", session stale ":"") | |
3168 | << ((dir_realm && realm != dir_realm)?", snaprealm differs ":"") | |
3169 | << (is_frozen()?", frozen inode":"") | |
3170 | << (state_test(CInode::STATE_EXPORTINGCAPS)?", exporting caps":"") | |
3171 | << dendl; | |
3172 | ||
3173 | ||
3174 | // "fake" a version that is old (stable) version, +1 if projected. | |
3175 | version_t version = (oi->version * 2) + is_projected(); | |
3176 | ||
3177 | Capability *cap = get_client_cap(client); | |
3178 | bool pfile = filelock.is_xlocked_by_client(client) || get_loner() == client; | |
3179 | //(cap && (cap->issued() & CEPH_CAP_FILE_EXCL)); | |
3180 | bool pauth = authlock.is_xlocked_by_client(client) || get_loner() == client; | |
3181 | bool plink = linklock.is_xlocked_by_client(client) || get_loner() == client; | |
3182 | bool pxattr = xattrlock.is_xlocked_by_client(client) || get_loner() == client; | |
3183 | ||
3184 | bool plocal = versionlock.get_last_wrlock_client() == client; | |
3185 | bool ppolicy = policylock.is_xlocked_by_client(client) || get_loner()==client; | |
3186 | ||
3187 | inode_t *any_i = (pfile|pauth|plink|pxattr|plocal) ? pi : oi; | |
3188 | ||
3189 | dout(20) << " pfile " << pfile << " pauth " << pauth | |
3190 | << " plink " << plink << " pxattr " << pxattr | |
3191 | << " plocal " << plocal | |
3192 | << " ctime " << any_i->ctime | |
3193 | << " valid=" << valid << dendl; | |
3194 | ||
3195 | // file | |
3196 | inode_t *file_i = pfile ? pi:oi; | |
3197 | file_layout_t layout; | |
3198 | if (is_dir()) { | |
3199 | layout = (ppolicy ? pi : oi)->layout; | |
3200 | } else { | |
3201 | layout = file_i->layout; | |
3202 | } | |
3203 | ||
3204 | // max_size is min of projected, actual | |
3205 | uint64_t max_size = | |
3206 | MIN(oi->client_ranges.count(client) ? | |
3207 | oi->client_ranges[client].range.last : 0, | |
3208 | pi->client_ranges.count(client) ? | |
3209 | pi->client_ranges[client].range.last : 0); | |
3210 | ||
3211 | // inline data | |
3212 | version_t inline_version = 0; | |
3213 | bufferlist inline_data; | |
3214 | if (file_i->inline_data.version == CEPH_INLINE_NONE) { | |
3215 | inline_version = CEPH_INLINE_NONE; | |
3216 | } else if ((!cap && !no_caps) || | |
3217 | (cap && cap->client_inline_version < file_i->inline_data.version) || | |
3218 | (getattr_caps & CEPH_CAP_FILE_RD)) { // client requests inline data | |
3219 | inline_version = file_i->inline_data.version; | |
3220 | if (file_i->inline_data.length() > 0) | |
3221 | inline_data = file_i->inline_data.get_data(); | |
3222 | } | |
3223 | ||
3224 | // nest (do same as file... :/) | |
3225 | if (cap) { | |
3226 | cap->last_rbytes = file_i->rstat.rbytes; | |
3227 | cap->last_rsize = file_i->rstat.rsize(); | |
3228 | } | |
3229 | ||
3230 | // auth | |
3231 | inode_t *auth_i = pauth ? pi:oi; | |
3232 | ||
3233 | // link | |
3234 | inode_t *link_i = plink ? pi:oi; | |
3235 | ||
3236 | // xattr | |
3237 | inode_t *xattr_i = pxattr ? pi:oi; | |
3238 | ||
3239 | // xattr | |
3240 | bufferlist xbl; | |
3241 | version_t xattr_version; | |
3242 | if ((!cap && !no_caps) || | |
3243 | (cap && cap->client_xattr_version < xattr_i->xattr_version) || | |
3244 | (getattr_caps & CEPH_CAP_XATTR_SHARED)) { // client requests xattrs | |
3245 | if (!pxattrs) | |
3246 | pxattrs = pxattr ? get_projected_xattrs() : &xattrs; | |
3247 | ::encode(*pxattrs, xbl); | |
3248 | xattr_version = xattr_i->xattr_version; | |
3249 | } else { | |
3250 | xattr_version = 0; | |
3251 | } | |
3252 | ||
3253 | // do we have room? | |
3254 | if (max_bytes) { | |
3255 | unsigned bytes = 8 + 8 + 4 + 8 + 8 + sizeof(ceph_mds_reply_cap) + | |
3256 | sizeof(struct ceph_file_layout) + 4 + layout.pool_ns.size() + | |
3257 | sizeof(struct ceph_timespec) * 3 + | |
3258 | 4 + 8 + 8 + 8 + 4 + 4 + 4 + 4 + 4 + | |
3259 | 8 + 8 + 8 + 8 + 8 + sizeof(struct ceph_timespec) + | |
3260 | 4; | |
3261 | bytes += sizeof(__u32); | |
3262 | bytes += (sizeof(__u32) + sizeof(__u32)) * dirfragtree._splits.size(); | |
3263 | bytes += sizeof(__u32) + symlink.length(); | |
3264 | bytes += sizeof(__u32) + xbl.length(); | |
3265 | bytes += sizeof(version_t) + sizeof(__u32) + inline_data.length(); | |
3266 | if (bytes > max_bytes) | |
3267 | return -ENOSPC; | |
3268 | } | |
3269 | ||
3270 | ||
3271 | // encode caps | |
3272 | struct ceph_mds_reply_cap ecap; | |
3273 | if (snapid != CEPH_NOSNAP) { | |
3274 | /* | |
3275 | * snapped inodes (files or dirs) only get read-only caps. always | |
3276 | * issue everything possible, since it is read only. | |
3277 | * | |
3278 | * if a snapped inode has caps, limit issued caps based on the | |
3279 | * lock state. | |
3280 | * | |
3281 | * if it is a live inode, limit issued caps based on the lock | |
3282 | * state. | |
3283 | * | |
3284 | * do NOT adjust cap issued state, because the client always | |
3285 | * tracks caps per-snap and the mds does either per-interval or | |
3286 | * multiversion. | |
3287 | */ | |
3288 | ecap.caps = valid ? get_caps_allowed_by_type(CAP_ANY) : CEPH_STAT_CAP_INODE; | |
3289 | if (last == CEPH_NOSNAP || is_any_caps()) | |
3290 | ecap.caps = ecap.caps & get_caps_allowed_for_client(session, file_i); | |
3291 | ecap.seq = 0; | |
3292 | ecap.mseq = 0; | |
3293 | ecap.realm = 0; | |
3294 | } else { | |
3295 | if (!no_caps && !cap) { | |
3296 | // add a new cap | |
3297 | cap = add_client_cap(client, session, realm); | |
3298 | if (is_auth()) { | |
3299 | if (choose_ideal_loner() >= 0) | |
3300 | try_set_loner(); | |
3301 | else if (get_wanted_loner() < 0) | |
3302 | try_drop_loner(); | |
3303 | } | |
3304 | } | |
3305 | ||
3306 | int issue = 0; | |
3307 | if (!no_caps && cap) { | |
3308 | int likes = get_caps_liked(); | |
3309 | int allowed = get_caps_allowed_for_client(session, file_i); | |
3310 | issue = (cap->wanted() | likes) & allowed; | |
3311 | cap->issue_norevoke(issue); | |
3312 | issue = cap->pending(); | |
3313 | dout(10) << "encode_inodestat issuing " << ccap_string(issue) | |
3314 | << " seq " << cap->get_last_seq() << dendl; | |
3315 | } else if (cap && cap->is_new() && !dir_realm) { | |
3316 | // alway issue new caps to client, otherwise the caps get lost | |
3317 | assert(cap->is_stale()); | |
3318 | issue = cap->pending() | CEPH_CAP_PIN; | |
3319 | cap->issue_norevoke(issue); | |
3320 | dout(10) << "encode_inodestat issuing " << ccap_string(issue) | |
3321 | << " seq " << cap->get_last_seq() | |
3322 | << "(stale|new caps)" << dendl; | |
3323 | } | |
3324 | ||
3325 | if (issue) { | |
3326 | cap->set_last_issue(); | |
3327 | cap->set_last_issue_stamp(ceph_clock_now()); | |
3328 | cap->clear_new(); | |
3329 | ecap.caps = issue; | |
3330 | ecap.wanted = cap->wanted(); | |
3331 | ecap.cap_id = cap->get_cap_id(); | |
3332 | ecap.seq = cap->get_last_seq(); | |
3333 | ecap.mseq = cap->get_mseq(); | |
3334 | ecap.realm = realm->inode->ino(); | |
3335 | } else { | |
3336 | ecap.cap_id = 0; | |
3337 | ecap.caps = 0; | |
3338 | ecap.seq = 0; | |
3339 | ecap.mseq = 0; | |
3340 | ecap.realm = 0; | |
3341 | ecap.wanted = 0; | |
3342 | } | |
3343 | } | |
3344 | ecap.flags = is_auth() ? CEPH_CAP_FLAG_AUTH : 0; | |
3345 | dout(10) << "encode_inodestat caps " << ccap_string(ecap.caps) | |
3346 | << " seq " << ecap.seq << " mseq " << ecap.mseq | |
3347 | << " xattrv " << xattr_version << " len " << xbl.length() | |
3348 | << dendl; | |
3349 | ||
3350 | if (inline_data.length() && cap) { | |
3351 | if ((cap->pending() | getattr_caps) & CEPH_CAP_FILE_SHARED) { | |
3352 | dout(10) << "including inline version " << inline_version << dendl; | |
3353 | cap->client_inline_version = inline_version; | |
3354 | } else { | |
3355 | dout(10) << "dropping inline version " << inline_version << dendl; | |
3356 | inline_version = 0; | |
3357 | inline_data.clear(); | |
3358 | } | |
3359 | } | |
3360 | ||
3361 | // include those xattrs? | |
3362 | if (xbl.length() && cap) { | |
3363 | if ((cap->pending() | getattr_caps) & CEPH_CAP_XATTR_SHARED) { | |
3364 | dout(10) << "including xattrs version " << xattr_i->xattr_version << dendl; | |
3365 | cap->client_xattr_version = xattr_i->xattr_version; | |
3366 | } else { | |
3367 | dout(10) << "dropping xattrs version " << xattr_i->xattr_version << dendl; | |
3368 | xbl.clear(); // no xattrs .. XXX what's this about?!? | |
3369 | xattr_version = 0; | |
3370 | } | |
3371 | } | |
3372 | ||
3373 | /* | |
3374 | * note: encoding matches MClientReply::InodeStat | |
3375 | */ | |
3376 | ::encode(oi->ino, bl); | |
3377 | ::encode(snapid, bl); | |
3378 | ::encode(oi->rdev, bl); | |
3379 | ::encode(version, bl); | |
3380 | ||
3381 | ::encode(xattr_version, bl); | |
3382 | ||
3383 | ::encode(ecap, bl); | |
3384 | { | |
3385 | ceph_file_layout legacy_layout; | |
3386 | layout.to_legacy(&legacy_layout); | |
3387 | ::encode(legacy_layout, bl); | |
3388 | } | |
3389 | ::encode(any_i->ctime, bl); | |
3390 | ::encode(file_i->mtime, bl); | |
3391 | ::encode(file_i->atime, bl); | |
3392 | ::encode(file_i->time_warp_seq, bl); | |
3393 | ::encode(file_i->size, bl); | |
3394 | ::encode(max_size, bl); | |
3395 | ::encode(file_i->truncate_size, bl); | |
3396 | ::encode(file_i->truncate_seq, bl); | |
3397 | ||
3398 | ::encode(auth_i->mode, bl); | |
3399 | ::encode((uint32_t)auth_i->uid, bl); | |
3400 | ::encode((uint32_t)auth_i->gid, bl); | |
3401 | ||
3402 | ::encode(link_i->nlink, bl); | |
3403 | ||
3404 | ::encode(file_i->dirstat.nfiles, bl); | |
3405 | ::encode(file_i->dirstat.nsubdirs, bl); | |
3406 | ::encode(file_i->rstat.rbytes, bl); | |
3407 | ::encode(file_i->rstat.rfiles, bl); | |
3408 | ::encode(file_i->rstat.rsubdirs, bl); | |
3409 | ::encode(file_i->rstat.rctime, bl); | |
3410 | ||
3411 | dirfragtree.encode(bl); | |
3412 | ||
3413 | ::encode(symlink, bl); | |
3414 | if (session->connection->has_feature(CEPH_FEATURE_DIRLAYOUTHASH)) { | |
3415 | ::encode(file_i->dir_layout, bl); | |
3416 | } | |
3417 | ::encode(xbl, bl); | |
3418 | if (session->connection->has_feature(CEPH_FEATURE_MDS_INLINE_DATA)) { | |
3419 | ::encode(inline_version, bl); | |
3420 | ::encode(inline_data, bl); | |
3421 | } | |
3422 | if (session->connection->has_feature(CEPH_FEATURE_MDS_QUOTA)) { | |
3423 | inode_t *policy_i = ppolicy ? pi : oi; | |
3424 | ::encode(policy_i->quota, bl); | |
3425 | } | |
3426 | if (session->connection->has_feature(CEPH_FEATURE_FS_FILE_LAYOUT_V2)) { | |
3427 | ::encode(layout.pool_ns, bl); | |
3428 | } | |
3429 | if (session->connection->has_feature(CEPH_FEATURE_FS_BTIME)) { | |
3430 | ::encode(any_i->btime, bl); | |
3431 | ::encode(any_i->change_attr, bl); | |
3432 | } | |
3433 | ||
3434 | return valid; | |
3435 | } | |
3436 | ||
3437 | void CInode::encode_cap_message(MClientCaps *m, Capability *cap) | |
3438 | { | |
3439 | assert(cap); | |
3440 | ||
3441 | client_t client = cap->get_client(); | |
3442 | ||
3443 | bool pfile = filelock.is_xlocked_by_client(client) || (cap->issued() & CEPH_CAP_FILE_EXCL); | |
3444 | bool pauth = authlock.is_xlocked_by_client(client); | |
3445 | bool plink = linklock.is_xlocked_by_client(client); | |
3446 | bool pxattr = xattrlock.is_xlocked_by_client(client); | |
3447 | ||
3448 | inode_t *oi = &inode; | |
3449 | inode_t *pi = get_projected_inode(); | |
3450 | inode_t *i = (pfile|pauth|plink|pxattr) ? pi : oi; | |
3451 | ||
3452 | dout(20) << "encode_cap_message pfile " << pfile | |
3453 | << " pauth " << pauth << " plink " << plink << " pxattr " << pxattr | |
3454 | << " ctime " << i->ctime << dendl; | |
3455 | ||
3456 | i = pfile ? pi:oi; | |
3457 | m->set_layout(i->layout); | |
3458 | m->size = i->size; | |
3459 | m->truncate_seq = i->truncate_seq; | |
3460 | m->truncate_size = i->truncate_size; | |
3461 | m->mtime = i->mtime; | |
3462 | m->atime = i->atime; | |
3463 | m->ctime = i->ctime; | |
3464 | m->change_attr = i->change_attr; | |
3465 | m->time_warp_seq = i->time_warp_seq; | |
3466 | ||
3467 | if (cap->client_inline_version < i->inline_data.version) { | |
3468 | m->inline_version = cap->client_inline_version = i->inline_data.version; | |
3469 | if (i->inline_data.length() > 0) | |
3470 | m->inline_data = i->inline_data.get_data(); | |
3471 | } else { | |
3472 | m->inline_version = 0; | |
3473 | } | |
3474 | ||
3475 | // max_size is min of projected, actual. | |
3476 | uint64_t oldms = oi->client_ranges.count(client) ? oi->client_ranges[client].range.last : 0; | |
3477 | uint64_t newms = pi->client_ranges.count(client) ? pi->client_ranges[client].range.last : 0; | |
3478 | m->max_size = MIN(oldms, newms); | |
3479 | ||
3480 | i = pauth ? pi:oi; | |
3481 | m->head.mode = i->mode; | |
3482 | m->head.uid = i->uid; | |
3483 | m->head.gid = i->gid; | |
3484 | ||
3485 | i = plink ? pi:oi; | |
3486 | m->head.nlink = i->nlink; | |
3487 | ||
3488 | i = pxattr ? pi:oi; | |
3489 | map<string,bufferptr> *ix = pxattr ? get_projected_xattrs() : &xattrs; | |
3490 | if ((cap->pending() & CEPH_CAP_XATTR_SHARED) && | |
3491 | i->xattr_version > cap->client_xattr_version) { | |
3492 | dout(10) << " including xattrs v " << i->xattr_version << dendl; | |
3493 | ::encode(*ix, m->xattrbl); | |
3494 | m->head.xattr_version = i->xattr_version; | |
3495 | cap->client_xattr_version = i->xattr_version; | |
3496 | } | |
3497 | } | |
3498 | ||
3499 | ||
3500 | ||
3501 | void CInode::_encode_base(bufferlist& bl, uint64_t features) | |
3502 | { | |
3503 | ::encode(first, bl); | |
3504 | ::encode(inode, bl, features); | |
3505 | ::encode(symlink, bl); | |
3506 | ::encode(dirfragtree, bl); | |
3507 | ::encode(xattrs, bl); | |
3508 | ::encode(old_inodes, bl, features); | |
3509 | ::encode(damage_flags, bl); | |
3510 | encode_snap(bl); | |
3511 | } | |
3512 | void CInode::_decode_base(bufferlist::iterator& p) | |
3513 | { | |
3514 | ::decode(first, p); | |
3515 | ::decode(inode, p); | |
3516 | ::decode(symlink, p); | |
3517 | ::decode(dirfragtree, p); | |
3518 | ::decode(xattrs, p); | |
3519 | ::decode(old_inodes, p); | |
3520 | ::decode(damage_flags, p); | |
3521 | decode_snap(p); | |
3522 | } | |
3523 | ||
3524 | void CInode::_encode_locks_full(bufferlist& bl) | |
3525 | { | |
3526 | ::encode(authlock, bl); | |
3527 | ::encode(linklock, bl); | |
3528 | ::encode(dirfragtreelock, bl); | |
3529 | ::encode(filelock, bl); | |
3530 | ::encode(xattrlock, bl); | |
3531 | ::encode(snaplock, bl); | |
3532 | ::encode(nestlock, bl); | |
3533 | ::encode(flocklock, bl); | |
3534 | ::encode(policylock, bl); | |
3535 | ||
3536 | ::encode(loner_cap, bl); | |
3537 | } | |
3538 | void CInode::_decode_locks_full(bufferlist::iterator& p) | |
3539 | { | |
3540 | ::decode(authlock, p); | |
3541 | ::decode(linklock, p); | |
3542 | ::decode(dirfragtreelock, p); | |
3543 | ::decode(filelock, p); | |
3544 | ::decode(xattrlock, p); | |
3545 | ::decode(snaplock, p); | |
3546 | ::decode(nestlock, p); | |
3547 | ::decode(flocklock, p); | |
3548 | ::decode(policylock, p); | |
3549 | ||
3550 | ::decode(loner_cap, p); | |
3551 | set_loner_cap(loner_cap); | |
3552 | want_loner_cap = loner_cap; // for now, we'll eval() shortly. | |
3553 | } | |
3554 | ||
3555 | void CInode::_encode_locks_state_for_replica(bufferlist& bl) | |
3556 | { | |
3557 | authlock.encode_state_for_replica(bl); | |
3558 | linklock.encode_state_for_replica(bl); | |
3559 | dirfragtreelock.encode_state_for_replica(bl); | |
3560 | filelock.encode_state_for_replica(bl); | |
3561 | nestlock.encode_state_for_replica(bl); | |
3562 | xattrlock.encode_state_for_replica(bl); | |
3563 | snaplock.encode_state_for_replica(bl); | |
3564 | flocklock.encode_state_for_replica(bl); | |
3565 | policylock.encode_state_for_replica(bl); | |
3566 | } | |
3567 | void CInode::_encode_locks_state_for_rejoin(bufferlist& bl, int rep) | |
3568 | { | |
3569 | authlock.encode_state_for_replica(bl); | |
3570 | linklock.encode_state_for_replica(bl); | |
3571 | dirfragtreelock.encode_state_for_rejoin(bl, rep); | |
3572 | filelock.encode_state_for_rejoin(bl, rep); | |
3573 | nestlock.encode_state_for_rejoin(bl, rep); | |
3574 | xattrlock.encode_state_for_replica(bl); | |
3575 | snaplock.encode_state_for_replica(bl); | |
3576 | flocklock.encode_state_for_replica(bl); | |
3577 | policylock.encode_state_for_replica(bl); | |
3578 | } | |
3579 | void CInode::_decode_locks_state(bufferlist::iterator& p, bool is_new) | |
3580 | { | |
3581 | authlock.decode_state(p, is_new); | |
3582 | linklock.decode_state(p, is_new); | |
3583 | dirfragtreelock.decode_state(p, is_new); | |
3584 | filelock.decode_state(p, is_new); | |
3585 | nestlock.decode_state(p, is_new); | |
3586 | xattrlock.decode_state(p, is_new); | |
3587 | snaplock.decode_state(p, is_new); | |
3588 | flocklock.decode_state(p, is_new); | |
3589 | policylock.decode_state(p, is_new); | |
3590 | } | |
3591 | void CInode::_decode_locks_rejoin(bufferlist::iterator& p, list<MDSInternalContextBase*>& waiters, | |
3592 | list<SimpleLock*>& eval_locks) | |
3593 | { | |
3594 | authlock.decode_state_rejoin(p, waiters); | |
3595 | linklock.decode_state_rejoin(p, waiters); | |
3596 | dirfragtreelock.decode_state_rejoin(p, waiters); | |
3597 | filelock.decode_state_rejoin(p, waiters); | |
3598 | nestlock.decode_state_rejoin(p, waiters); | |
3599 | xattrlock.decode_state_rejoin(p, waiters); | |
3600 | snaplock.decode_state_rejoin(p, waiters); | |
3601 | flocklock.decode_state_rejoin(p, waiters); | |
3602 | policylock.decode_state_rejoin(p, waiters); | |
3603 | ||
3604 | if (!dirfragtreelock.is_stable() && !dirfragtreelock.is_wrlocked()) | |
3605 | eval_locks.push_back(&dirfragtreelock); | |
3606 | if (!filelock.is_stable() && !filelock.is_wrlocked()) | |
3607 | eval_locks.push_back(&filelock); | |
3608 | if (!nestlock.is_stable() && !nestlock.is_wrlocked()) | |
3609 | eval_locks.push_back(&nestlock); | |
3610 | } | |
3611 | ||
3612 | ||
3613 | // IMPORT/EXPORT | |
3614 | ||
3615 | void CInode::encode_export(bufferlist& bl) | |
3616 | { | |
3617 | ENCODE_START(5, 4, bl); | |
3618 | _encode_base(bl, mdcache->mds->mdsmap->get_up_features()); | |
3619 | ||
3620 | ::encode(state, bl); | |
3621 | ||
3622 | ::encode(pop, bl); | |
3623 | ||
3624 | ::encode(replica_map, bl); | |
3625 | ||
3626 | // include scatterlock info for any bounding CDirs | |
3627 | bufferlist bounding; | |
3628 | if (inode.is_dir()) | |
3629 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
3630 | p != dirfrags.end(); | |
3631 | ++p) { | |
3632 | CDir *dir = p->second; | |
3633 | if (dir->state_test(CDir::STATE_EXPORTBOUND)) { | |
3634 | ::encode(p->first, bounding); | |
3635 | ::encode(dir->fnode.fragstat, bounding); | |
3636 | ::encode(dir->fnode.accounted_fragstat, bounding); | |
3637 | ::encode(dir->fnode.rstat, bounding); | |
3638 | ::encode(dir->fnode.accounted_rstat, bounding); | |
3639 | dout(10) << " encoded fragstat/rstat info for " << *dir << dendl; | |
3640 | } | |
3641 | } | |
3642 | ::encode(bounding, bl); | |
3643 | ||
3644 | _encode_locks_full(bl); | |
3645 | ||
3646 | _encode_file_locks(bl); | |
3647 | ||
3648 | ENCODE_FINISH(bl); | |
3649 | ||
3650 | get(PIN_TEMPEXPORTING); | |
3651 | } | |
3652 | ||
3653 | void CInode::finish_export(utime_t now) | |
3654 | { | |
3655 | state &= MASK_STATE_EXPORT_KEPT; | |
3656 | ||
3657 | pop.zero(now); | |
3658 | ||
3659 | // just in case! | |
3660 | //dirlock.clear_updated(); | |
3661 | ||
3662 | loner_cap = -1; | |
3663 | ||
3664 | put(PIN_TEMPEXPORTING); | |
3665 | } | |
3666 | ||
3667 | void CInode::decode_import(bufferlist::iterator& p, | |
3668 | LogSegment *ls) | |
3669 | { | |
3670 | DECODE_START(5, p); | |
3671 | ||
3672 | _decode_base(p); | |
3673 | ||
3674 | unsigned s; | |
3675 | ::decode(s, p); | |
3676 | state_set(STATE_AUTH | (s & MASK_STATE_EXPORTED)); | |
3677 | ||
3678 | if (is_dirty()) { | |
3679 | get(PIN_DIRTY); | |
3680 | _mark_dirty(ls); | |
3681 | } | |
3682 | if (is_dirty_parent()) { | |
3683 | get(PIN_DIRTYPARENT); | |
3684 | _mark_dirty_parent(ls); | |
3685 | } | |
3686 | ||
3687 | ::decode(pop, ceph_clock_now(), p); | |
3688 | ||
3689 | ::decode(replica_map, p); | |
3690 | if (!replica_map.empty()) | |
3691 | get(PIN_REPLICATED); | |
3692 | replica_nonce = 0; | |
3693 | ||
3694 | // decode fragstat info on bounding cdirs | |
3695 | bufferlist bounding; | |
3696 | ::decode(bounding, p); | |
3697 | bufferlist::iterator q = bounding.begin(); | |
3698 | while (!q.end()) { | |
3699 | frag_t fg; | |
3700 | ::decode(fg, q); | |
3701 | CDir *dir = get_dirfrag(fg); | |
3702 | assert(dir); // we should have all bounds open | |
3703 | ||
3704 | // Only take the remote's fragstat/rstat if we are non-auth for | |
3705 | // this dirfrag AND the lock is NOT in a scattered (MIX) state. | |
3706 | // We know lock is stable, and MIX is the only state in which | |
3707 | // the inode auth (who sent us this data) may not have the best | |
3708 | // info. | |
3709 | ||
3710 | // HMM: Are there cases where dir->is_auth() is an insufficient | |
3711 | // check because the dirfrag is under migration? That implies | |
3712 | // it is frozen (and in a SYNC or LOCK state). FIXME. | |
3713 | ||
3714 | if (dir->is_auth() || | |
3715 | filelock.get_state() == LOCK_MIX) { | |
3716 | dout(10) << " skipped fragstat info for " << *dir << dendl; | |
3717 | frag_info_t f; | |
3718 | ::decode(f, q); | |
3719 | ::decode(f, q); | |
3720 | } else { | |
3721 | ::decode(dir->fnode.fragstat, q); | |
3722 | ::decode(dir->fnode.accounted_fragstat, q); | |
3723 | dout(10) << " took fragstat info for " << *dir << dendl; | |
3724 | } | |
3725 | if (dir->is_auth() || | |
3726 | nestlock.get_state() == LOCK_MIX) { | |
3727 | dout(10) << " skipped rstat info for " << *dir << dendl; | |
3728 | nest_info_t n; | |
3729 | ::decode(n, q); | |
3730 | ::decode(n, q); | |
3731 | } else { | |
3732 | ::decode(dir->fnode.rstat, q); | |
3733 | ::decode(dir->fnode.accounted_rstat, q); | |
3734 | dout(10) << " took rstat info for " << *dir << dendl; | |
3735 | } | |
3736 | } | |
3737 | ||
3738 | _decode_locks_full(p); | |
3739 | ||
3740 | _decode_file_locks(p); | |
3741 | ||
3742 | DECODE_FINISH(p); | |
3743 | } | |
3744 | ||
3745 | ||
3746 | void InodeStoreBase::dump(Formatter *f) const | |
3747 | { | |
3748 | inode.dump(f); | |
3749 | f->dump_string("symlink", symlink); | |
3750 | f->open_array_section("old_inodes"); | |
3751 | for (compact_map<snapid_t, old_inode_t>::const_iterator i = old_inodes.begin(); | |
3752 | i != old_inodes.end(); ++i) { | |
3753 | f->open_object_section("old_inode"); | |
3754 | { | |
3755 | // The key is the last snapid, the first is in the old_inode_t | |
3756 | f->dump_int("last", i->first); | |
3757 | i->second.dump(f); | |
3758 | } | |
3759 | f->close_section(); // old_inode | |
3760 | } | |
3761 | f->close_section(); // old_inodes | |
3762 | ||
3763 | f->open_object_section("dirfragtree"); | |
3764 | dirfragtree.dump(f); | |
3765 | f->close_section(); // dirfragtree | |
3766 | } | |
3767 | ||
3768 | ||
3769 | void InodeStore::generate_test_instances(list<InodeStore*> &ls) | |
3770 | { | |
3771 | InodeStore *populated = new InodeStore; | |
3772 | populated->inode.ino = 0xdeadbeef; | |
3773 | populated->symlink = "rhubarb"; | |
3774 | ls.push_back(populated); | |
3775 | } | |
3776 | ||
3777 | void CInode::validate_disk_state(CInode::validated_data *results, | |
3778 | MDSInternalContext *fin) | |
3779 | { | |
3780 | class ValidationContinuation : public MDSContinuation { | |
3781 | public: | |
3782 | MDSInternalContext *fin; | |
3783 | CInode *in; | |
3784 | CInode::validated_data *results; | |
3785 | bufferlist bl; | |
3786 | CInode *shadow_in; | |
3787 | ||
3788 | enum { | |
3789 | START = 0, | |
3790 | BACKTRACE, | |
3791 | INODE, | |
3792 | DIRFRAGS | |
3793 | }; | |
3794 | ||
3795 | ValidationContinuation(CInode *i, | |
3796 | CInode::validated_data *data_r, | |
3797 | MDSInternalContext *fin_) : | |
3798 | MDSContinuation(i->mdcache->mds->server), | |
3799 | fin(fin_), | |
3800 | in(i), | |
3801 | results(data_r), | |
3802 | shadow_in(NULL) { | |
3803 | set_callback(START, static_cast<Continuation::stagePtr>(&ValidationContinuation::_start)); | |
3804 | set_callback(BACKTRACE, static_cast<Continuation::stagePtr>(&ValidationContinuation::_backtrace)); | |
3805 | set_callback(INODE, static_cast<Continuation::stagePtr>(&ValidationContinuation::_inode_disk)); | |
3806 | set_callback(DIRFRAGS, static_cast<Continuation::stagePtr>(&ValidationContinuation::_dirfrags)); | |
3807 | } | |
3808 | ||
3809 | ~ValidationContinuation() override { | |
3810 | delete shadow_in; | |
3811 | } | |
3812 | ||
3813 | /** | |
3814 | * Fetch backtrace and set tag if tag is non-empty | |
3815 | */ | |
3816 | void fetch_backtrace_and_tag(CInode *in, std::string tag, | |
3817 | Context *fin, int *bt_r, bufferlist *bt) | |
3818 | { | |
3819 | const int64_t pool = in->get_backtrace_pool(); | |
3820 | object_t oid = CInode::get_object_name(in->ino(), frag_t(), ""); | |
3821 | ||
3822 | ObjectOperation fetch; | |
3823 | fetch.getxattr("parent", bt, bt_r); | |
3824 | in->mdcache->mds->objecter->read(oid, object_locator_t(pool), fetch, CEPH_NOSNAP, | |
3825 | NULL, 0, fin); | |
3826 | if (!tag.empty()) { | |
3827 | ObjectOperation scrub_tag; | |
3828 | bufferlist tag_bl; | |
3829 | ::encode(tag, tag_bl); | |
3830 | scrub_tag.setxattr("scrub_tag", tag_bl); | |
3831 | SnapContext snapc; | |
3832 | in->mdcache->mds->objecter->mutate(oid, object_locator_t(pool), scrub_tag, snapc, | |
3833 | ceph::real_clock::now(), | |
3834 | 0, NULL); | |
3835 | } | |
3836 | } | |
3837 | ||
3838 | bool _start(int rval) { | |
3839 | if (in->is_dirty()) { | |
3840 | MDCache *mdcache = in->mdcache; | |
3841 | inode_t& inode = in->inode; | |
3842 | dout(20) << "validating a dirty CInode; results will be inconclusive" | |
3843 | << dendl; | |
3844 | } | |
3845 | if (in->is_symlink()) { | |
3846 | // there's nothing to do for symlinks! | |
3847 | return true; | |
3848 | } | |
3849 | ||
3850 | C_OnFinisher *conf = new C_OnFinisher(get_io_callback(BACKTRACE), | |
3851 | in->mdcache->mds->finisher); | |
3852 | ||
3853 | // Whether we have a tag to apply depends on ScrubHeader (if one is | |
3854 | // present) | |
3855 | if (in->scrub_infop) { | |
3856 | // I'm a non-orphan, so look up my ScrubHeader via my linkage | |
3857 | const std::string &tag = in->scrub_infop->header->get_tag(); | |
3858 | // Rather than using the usual CInode::fetch_backtrace, | |
3859 | // use a special variant that optionally writes a tag in the same | |
3860 | // operation. | |
3861 | fetch_backtrace_and_tag(in, tag, conf, | |
3862 | &results->backtrace.ondisk_read_retval, &bl); | |
3863 | } else { | |
3864 | // When we're invoked outside of ScrubStack we might be called | |
3865 | // on an orphaned inode like / | |
3866 | fetch_backtrace_and_tag(in, {}, conf, | |
3867 | &results->backtrace.ondisk_read_retval, &bl); | |
3868 | } | |
3869 | return false; | |
3870 | } | |
3871 | ||
3872 | bool _backtrace(int rval) { | |
3873 | // set up basic result reporting and make sure we got the data | |
3874 | results->performed_validation = true; // at least, some of it! | |
3875 | results->backtrace.checked = true; | |
3876 | ||
3877 | const int64_t pool = in->get_backtrace_pool(); | |
3878 | inode_backtrace_t& memory_backtrace = results->backtrace.memory_value; | |
3879 | in->build_backtrace(pool, memory_backtrace); | |
3880 | bool equivalent, divergent; | |
3881 | int memory_newer; | |
3882 | ||
3883 | MDCache *mdcache = in->mdcache; // For the benefit of dout | |
3884 | const inode_t& inode = in->inode; // For the benefit of dout | |
3885 | ||
3886 | // Ignore rval because it's the result of a FAILOK operation | |
3887 | // from fetch_backtrace_and_tag: the real result is in | |
3888 | // backtrace.ondisk_read_retval | |
3889 | dout(20) << "ondisk_read_retval: " << results->backtrace.ondisk_read_retval << dendl; | |
3890 | if (results->backtrace.ondisk_read_retval != 0) { | |
3891 | results->backtrace.error_str << "failed to read off disk; see retval"; | |
3892 | goto next; | |
3893 | } | |
3894 | ||
3895 | // extract the backtrace, and compare it to a newly-constructed one | |
3896 | try { | |
3897 | bufferlist::iterator p = bl.begin(); | |
3898 | ::decode(results->backtrace.ondisk_value, p); | |
3899 | dout(10) << "decoded " << bl.length() << " bytes of backtrace successfully" << dendl; | |
3900 | } catch (buffer::error&) { | |
3901 | if (results->backtrace.ondisk_read_retval == 0 && rval != 0) { | |
3902 | // Cases where something has clearly gone wrong with the overall | |
3903 | // fetch op, though we didn't get a nonzero rc from the getxattr | |
3904 | // operation. e.g. object missing. | |
3905 | results->backtrace.ondisk_read_retval = rval; | |
3906 | } | |
3907 | results->backtrace.error_str << "failed to decode on-disk backtrace (" | |
3908 | << bl.length() << " bytes)!"; | |
3909 | goto next; | |
3910 | } | |
3911 | ||
3912 | memory_newer = memory_backtrace.compare(results->backtrace.ondisk_value, | |
3913 | &equivalent, &divergent); | |
3914 | ||
3915 | if (divergent || memory_newer < 0) { | |
3916 | // we're divergent, or on-disk version is newer | |
3917 | results->backtrace.error_str << "On-disk backtrace is divergent or newer"; | |
3918 | } else { | |
3919 | results->backtrace.passed = true; | |
3920 | } | |
3921 | next: | |
3922 | ||
3923 | if (!results->backtrace.passed && in->scrub_infop->header->get_repair()) { | |
3924 | std::string path; | |
3925 | in->make_path_string(path); | |
3926 | in->mdcache->mds->clog->warn() << "bad backtrace on inode " << *in | |
3927 | << ", rewriting it at " << path; | |
3928 | in->_mark_dirty_parent(in->mdcache->mds->mdlog->get_current_segment(), | |
3929 | false); | |
3930 | } | |
3931 | ||
3932 | // If the inode's number was free in the InoTable, fix that | |
3933 | // (#15619) | |
3934 | { | |
3935 | InoTable *inotable = mdcache->mds->inotable; | |
3936 | ||
3937 | dout(10) << "scrub: inotable ino = 0x" << std::hex << inode.ino << dendl; | |
3938 | dout(10) << "scrub: inotable free says " | |
3939 | << inotable->is_marked_free(inode.ino) << dendl; | |
3940 | ||
3941 | if (inotable->is_marked_free(inode.ino)) { | |
3942 | LogChannelRef clog = in->mdcache->mds->clog; | |
3943 | clog->error() << "scrub: inode wrongly marked free: 0x" << std::hex | |
3944 | << inode.ino; | |
3945 | ||
3946 | if (in->scrub_infop->header->get_repair()) { | |
3947 | bool repaired = inotable->repair(inode.ino); | |
3948 | if (repaired) { | |
3949 | clog->error() << "inode table repaired for inode: 0x" << std::hex | |
3950 | << inode.ino; | |
3951 | ||
3952 | inotable->save(); | |
3953 | } else { | |
3954 | clog->error() << "Cannot repair inotable while other operations" | |
3955 | " are in progress"; | |
3956 | } | |
3957 | } | |
3958 | } | |
3959 | } | |
3960 | ||
3961 | // quit if we're a file, or kick off directory checks otherwise | |
3962 | // TODO: validate on-disk inode for non-base directories | |
3963 | if (!in->is_dir()) { | |
3964 | return true; | |
3965 | } | |
3966 | ||
3967 | return validate_directory_data(); | |
3968 | } | |
3969 | ||
3970 | bool validate_directory_data() { | |
3971 | assert(in->is_dir()); | |
3972 | ||
3973 | if (in->is_base()) { | |
3974 | shadow_in = new CInode(in->mdcache); | |
3975 | in->mdcache->create_unlinked_system_inode(shadow_in, | |
3976 | in->inode.ino, | |
3977 | in->inode.mode); | |
3978 | shadow_in->fetch(get_internal_callback(INODE)); | |
3979 | return false; | |
3980 | } else { | |
3981 | results->inode.passed = true; | |
3982 | return check_dirfrag_rstats(); | |
3983 | } | |
3984 | } | |
3985 | ||
3986 | bool _inode_disk(int rval) { | |
3987 | results->inode.checked = true; | |
3988 | results->inode.ondisk_read_retval = rval; | |
3989 | results->inode.ondisk_value = shadow_in->inode; | |
3990 | results->inode.memory_value = in->inode; | |
3991 | ||
3992 | inode_t& si = shadow_in->inode; | |
3993 | inode_t& i = in->inode; | |
3994 | if (si.version > i.version) { | |
3995 | // uh, what? | |
3996 | results->inode.error_str << "On-disk inode is newer than in-memory one!"; | |
3997 | goto next; | |
3998 | } else { | |
3999 | bool divergent = false; | |
4000 | int r = i.compare(si, &divergent); | |
4001 | results->inode.passed = !divergent && r >= 0; | |
4002 | if (!results->inode.passed) { | |
4003 | results->inode.error_str << | |
4004 | "On-disk inode is divergent or newer than in-memory one!"; | |
4005 | goto next; | |
4006 | } | |
4007 | } | |
4008 | next: | |
4009 | return check_dirfrag_rstats(); | |
4010 | } | |
4011 | ||
4012 | bool check_dirfrag_rstats() { | |
4013 | MDSGatherBuilder gather(g_ceph_context); | |
4014 | std::list<frag_t> frags; | |
4015 | in->dirfragtree.get_leaves(frags); | |
4016 | for (list<frag_t>::iterator p = frags.begin(); | |
4017 | p != frags.end(); | |
4018 | ++p) { | |
4019 | CDir *dir = in->get_or_open_dirfrag(in->mdcache, *p); | |
4020 | dir->scrub_info(); | |
4021 | if (!dir->scrub_infop->header) | |
4022 | dir->scrub_infop->header = in->scrub_infop->header; | |
4023 | if (dir->is_complete()) { | |
4024 | dir->scrub_local(); | |
4025 | } else { | |
4026 | dir->scrub_infop->need_scrub_local = true; | |
4027 | dir->fetch(gather.new_sub(), false); | |
4028 | } | |
4029 | } | |
4030 | if (gather.has_subs()) { | |
4031 | gather.set_finisher(get_internal_callback(DIRFRAGS)); | |
4032 | gather.activate(); | |
4033 | return false; | |
4034 | } else { | |
4035 | return immediate(DIRFRAGS, 0); | |
4036 | } | |
4037 | } | |
4038 | ||
4039 | bool _dirfrags(int rval) { | |
4040 | int frags_errors = 0; | |
4041 | // basic reporting setup | |
4042 | results->raw_stats.checked = true; | |
4043 | results->raw_stats.ondisk_read_retval = rval; | |
4044 | ||
4045 | results->raw_stats.memory_value.dirstat = in->inode.dirstat; | |
4046 | results->raw_stats.memory_value.rstat = in->inode.rstat; | |
4047 | frag_info_t& dir_info = results->raw_stats.ondisk_value.dirstat; | |
4048 | nest_info_t& nest_info = results->raw_stats.ondisk_value.rstat; | |
4049 | ||
4050 | if (rval != 0) { | |
4051 | results->raw_stats.error_str << "Failed to read dirfrags off disk"; | |
4052 | goto next; | |
4053 | } | |
4054 | ||
4055 | // check each dirfrag... | |
4056 | for (compact_map<frag_t,CDir*>::iterator p = in->dirfrags.begin(); | |
4057 | p != in->dirfrags.end(); | |
4058 | ++p) { | |
4059 | CDir *dir = p->second; | |
4060 | assert(dir->get_version() > 0); | |
4061 | nest_info.add(dir->fnode.accounted_rstat); | |
4062 | dir_info.add(dir->fnode.accounted_fragstat); | |
4063 | if (dir->scrub_infop && | |
4064 | dir->scrub_infop->pending_scrub_error) { | |
4065 | dir->scrub_infop->pending_scrub_error = false; | |
4066 | if (dir->scrub_infop->header->get_repair()) { | |
4067 | results->raw_stats.error_str | |
4068 | << "dirfrag(" << p->first << ") has bad stats (will be fixed); "; | |
4069 | } else { | |
4070 | results->raw_stats.error_str | |
4071 | << "dirfrag(" << p->first << ") has bad stats; "; | |
4072 | } | |
4073 | frags_errors++; | |
4074 | } | |
4075 | } | |
4076 | nest_info.rsubdirs++; // it gets one to account for self | |
4077 | // ...and that their sum matches our inode settings | |
4078 | if (!dir_info.same_sums(in->inode.dirstat) || | |
4079 | !nest_info.same_sums(in->inode.rstat)) { | |
4080 | if (in->scrub_infop && | |
4081 | in->scrub_infop->header->get_repair()) { | |
4082 | results->raw_stats.error_str | |
4083 | << "freshly-calculated rstats don't match existing ones (will be fixed)"; | |
4084 | in->mdcache->repair_inode_stats(in); | |
4085 | } else { | |
4086 | results->raw_stats.error_str | |
4087 | << "freshly-calculated rstats don't match existing ones"; | |
4088 | } | |
4089 | goto next; | |
4090 | } | |
4091 | if (frags_errors > 0) | |
4092 | goto next; | |
4093 | ||
4094 | results->raw_stats.passed = true; | |
4095 | next: | |
4096 | return true; | |
4097 | } | |
4098 | ||
4099 | void _done() override { | |
4100 | if ((!results->raw_stats.checked || results->raw_stats.passed) && | |
4101 | (!results->backtrace.checked || results->backtrace.passed) && | |
4102 | (!results->inode.checked || results->inode.passed)) | |
4103 | results->passed_validation = true; | |
4104 | if (fin) { | |
4105 | fin->complete(get_rval()); | |
4106 | } | |
4107 | } | |
4108 | }; | |
4109 | ||
4110 | ||
4111 | dout(10) << "scrub starting validate_disk_state on " << *this << dendl; | |
4112 | ValidationContinuation *vc = new ValidationContinuation(this, | |
4113 | results, | |
4114 | fin); | |
4115 | vc->begin(); | |
4116 | } | |
4117 | ||
4118 | void CInode::validated_data::dump(Formatter *f) const | |
4119 | { | |
4120 | f->open_object_section("results"); | |
4121 | { | |
4122 | f->dump_bool("performed_validation", performed_validation); | |
4123 | f->dump_bool("passed_validation", passed_validation); | |
4124 | f->open_object_section("backtrace"); | |
4125 | { | |
4126 | f->dump_bool("checked", backtrace.checked); | |
4127 | f->dump_bool("passed", backtrace.passed); | |
4128 | f->dump_int("read_ret_val", backtrace.ondisk_read_retval); | |
4129 | f->dump_stream("ondisk_value") << backtrace.ondisk_value; | |
4130 | f->dump_stream("memoryvalue") << backtrace.memory_value; | |
4131 | f->dump_string("error_str", backtrace.error_str.str()); | |
4132 | } | |
4133 | f->close_section(); // backtrace | |
4134 | f->open_object_section("raw_stats"); | |
4135 | { | |
4136 | f->dump_bool("checked", raw_stats.checked); | |
4137 | f->dump_bool("passed", raw_stats.passed); | |
4138 | f->dump_int("read_ret_val", raw_stats.ondisk_read_retval); | |
4139 | f->dump_stream("ondisk_value.dirstat") << raw_stats.ondisk_value.dirstat; | |
4140 | f->dump_stream("ondisk_value.rstat") << raw_stats.ondisk_value.rstat; | |
4141 | f->dump_stream("memory_value.dirrstat") << raw_stats.memory_value.dirstat; | |
4142 | f->dump_stream("memory_value.rstat") << raw_stats.memory_value.rstat; | |
4143 | f->dump_string("error_str", raw_stats.error_str.str()); | |
4144 | } | |
4145 | f->close_section(); // raw_stats | |
4146 | // dump failure return code | |
4147 | int rc = 0; | |
4148 | if (backtrace.checked && backtrace.ondisk_read_retval) | |
4149 | rc = backtrace.ondisk_read_retval; | |
4150 | if (inode.checked && inode.ondisk_read_retval) | |
4151 | rc = inode.ondisk_read_retval; | |
4152 | if (raw_stats.checked && raw_stats.ondisk_read_retval) | |
4153 | rc = raw_stats.ondisk_read_retval; | |
4154 | f->dump_int("return_code", rc); | |
4155 | } | |
4156 | f->close_section(); // results | |
4157 | } | |
4158 | ||
4159 | void CInode::dump(Formatter *f) const | |
4160 | { | |
4161 | InodeStoreBase::dump(f); | |
4162 | ||
4163 | MDSCacheObject::dump(f); | |
4164 | ||
4165 | f->open_object_section("versionlock"); | |
4166 | versionlock.dump(f); | |
4167 | f->close_section(); | |
4168 | ||
4169 | f->open_object_section("authlock"); | |
4170 | authlock.dump(f); | |
4171 | f->close_section(); | |
4172 | ||
4173 | f->open_object_section("linklock"); | |
4174 | linklock.dump(f); | |
4175 | f->close_section(); | |
4176 | ||
4177 | f->open_object_section("dirfragtreelock"); | |
4178 | dirfragtreelock.dump(f); | |
4179 | f->close_section(); | |
4180 | ||
4181 | f->open_object_section("filelock"); | |
4182 | filelock.dump(f); | |
4183 | f->close_section(); | |
4184 | ||
4185 | f->open_object_section("xattrlock"); | |
4186 | xattrlock.dump(f); | |
4187 | f->close_section(); | |
4188 | ||
4189 | f->open_object_section("snaplock"); | |
4190 | snaplock.dump(f); | |
4191 | f->close_section(); | |
4192 | ||
4193 | f->open_object_section("nestlock"); | |
4194 | nestlock.dump(f); | |
4195 | f->close_section(); | |
4196 | ||
4197 | f->open_object_section("flocklock"); | |
4198 | flocklock.dump(f); | |
4199 | f->close_section(); | |
4200 | ||
4201 | f->open_object_section("policylock"); | |
4202 | policylock.dump(f); | |
4203 | f->close_section(); | |
4204 | ||
4205 | f->open_array_section("states"); | |
4206 | MDSCacheObject::dump_states(f); | |
4207 | if (state_test(STATE_EXPORTING)) | |
4208 | f->dump_string("state", "exporting"); | |
4209 | if (state_test(STATE_OPENINGDIR)) | |
4210 | f->dump_string("state", "openingdir"); | |
4211 | if (state_test(STATE_FREEZING)) | |
4212 | f->dump_string("state", "freezing"); | |
4213 | if (state_test(STATE_FROZEN)) | |
4214 | f->dump_string("state", "frozen"); | |
4215 | if (state_test(STATE_AMBIGUOUSAUTH)) | |
4216 | f->dump_string("state", "ambiguousauth"); | |
4217 | if (state_test(STATE_EXPORTINGCAPS)) | |
4218 | f->dump_string("state", "exportingcaps"); | |
4219 | if (state_test(STATE_NEEDSRECOVER)) | |
4220 | f->dump_string("state", "needsrecover"); | |
4221 | if (state_test(STATE_PURGING)) | |
4222 | f->dump_string("state", "purging"); | |
4223 | if (state_test(STATE_DIRTYPARENT)) | |
4224 | f->dump_string("state", "dirtyparent"); | |
4225 | if (state_test(STATE_DIRTYRSTAT)) | |
4226 | f->dump_string("state", "dirtyrstat"); | |
4227 | if (state_test(STATE_STRAYPINNED)) | |
4228 | f->dump_string("state", "straypinned"); | |
4229 | if (state_test(STATE_FROZENAUTHPIN)) | |
4230 | f->dump_string("state", "frozenauthpin"); | |
4231 | if (state_test(STATE_DIRTYPOOL)) | |
4232 | f->dump_string("state", "dirtypool"); | |
4233 | if (state_test(STATE_ORPHAN)) | |
4234 | f->dump_string("state", "orphan"); | |
4235 | if (state_test(STATE_MISSINGOBJS)) | |
4236 | f->dump_string("state", "missingobjs"); | |
4237 | f->close_section(); | |
4238 | ||
4239 | f->open_array_section("client_caps"); | |
4240 | for (map<client_t,Capability*>::const_iterator it = client_caps.begin(); | |
4241 | it != client_caps.end(); ++it) { | |
4242 | f->open_object_section("client_cap"); | |
4243 | f->dump_int("client_id", it->first.v); | |
4244 | f->dump_string("pending", ccap_string(it->second->pending())); | |
4245 | f->dump_string("issued", ccap_string(it->second->issued())); | |
4246 | f->dump_string("wanted", ccap_string(it->second->wanted())); | |
4247 | f->dump_string("last_sent", ccap_string(it->second->get_last_sent())); | |
4248 | f->close_section(); | |
4249 | } | |
4250 | f->close_section(); | |
4251 | ||
4252 | f->dump_int("loner", loner_cap.v); | |
4253 | f->dump_int("want_loner", want_loner_cap.v); | |
4254 | ||
4255 | f->open_array_section("mds_caps_wanted"); | |
4256 | for (compact_map<int,int>::const_iterator p = mds_caps_wanted.begin(); | |
4257 | p != mds_caps_wanted.end(); ++p) { | |
4258 | f->open_object_section("mds_cap_wanted"); | |
4259 | f->dump_int("rank", p->first); | |
4260 | f->dump_string("cap", ccap_string(p->second)); | |
4261 | f->close_section(); | |
4262 | } | |
4263 | f->close_section(); | |
4264 | } | |
4265 | ||
4266 | /****** Scrub Stuff *****/ | |
4267 | void CInode::scrub_info_create() const | |
4268 | { | |
4269 | dout(25) << __func__ << dendl; | |
4270 | assert(!scrub_infop); | |
4271 | ||
4272 | // break out of const-land to set up implicit initial state | |
4273 | CInode *me = const_cast<CInode*>(this); | |
4274 | inode_t *in = me->get_projected_inode(); | |
4275 | ||
4276 | scrub_info_t *si = new scrub_info_t(); | |
4277 | si->scrub_start_stamp = si->last_scrub_stamp = in->last_scrub_stamp; | |
4278 | si->scrub_start_version = si->last_scrub_version = in->last_scrub_version; | |
4279 | ||
4280 | me->scrub_infop = si; | |
4281 | } | |
4282 | ||
4283 | void CInode::scrub_maybe_delete_info() | |
4284 | { | |
4285 | if (scrub_infop && | |
4286 | !scrub_infop->scrub_in_progress && | |
4287 | !scrub_infop->last_scrub_dirty) { | |
4288 | delete scrub_infop; | |
4289 | scrub_infop = NULL; | |
4290 | } | |
4291 | } | |
4292 | ||
4293 | void CInode::scrub_initialize(CDentry *scrub_parent, | |
4294 | const ScrubHeaderRefConst& header, | |
4295 | MDSInternalContextBase *f) | |
4296 | { | |
4297 | dout(20) << __func__ << " with scrub_version " << get_version() << dendl; | |
4298 | assert(!scrub_is_in_progress()); | |
4299 | scrub_info(); | |
4300 | if (!scrub_infop) | |
4301 | scrub_infop = new scrub_info_t(); | |
4302 | ||
4303 | if (get_projected_inode()->is_dir()) { | |
4304 | // fill in dirfrag_stamps with initial state | |
4305 | std::list<frag_t> frags; | |
4306 | dirfragtree.get_leaves(frags); | |
4307 | for (std::list<frag_t>::iterator i = frags.begin(); | |
4308 | i != frags.end(); | |
4309 | ++i) { | |
4310 | if (header->get_force()) | |
4311 | scrub_infop->dirfrag_stamps[*i].reset(); | |
4312 | else | |
4313 | scrub_infop->dirfrag_stamps[*i]; | |
4314 | } | |
4315 | } | |
4316 | ||
4317 | if (scrub_parent) | |
4318 | scrub_parent->get(CDentry::PIN_SCRUBPARENT); | |
4319 | scrub_infop->scrub_parent = scrub_parent; | |
4320 | scrub_infop->on_finish = f; | |
4321 | scrub_infop->scrub_in_progress = true; | |
4322 | scrub_infop->children_scrubbed = false; | |
4323 | scrub_infop->header = header; | |
4324 | ||
4325 | scrub_infop->scrub_start_version = get_version(); | |
4326 | scrub_infop->scrub_start_stamp = ceph_clock_now(); | |
4327 | // right now we don't handle remote inodes | |
4328 | } | |
4329 | ||
4330 | int CInode::scrub_dirfrag_next(frag_t* out_dirfrag) | |
4331 | { | |
4332 | dout(20) << __func__ << dendl; | |
4333 | assert(scrub_is_in_progress()); | |
4334 | ||
4335 | if (!is_dir()) { | |
4336 | return -ENOTDIR; | |
4337 | } | |
4338 | ||
4339 | std::map<frag_t, scrub_stamp_info_t>::iterator i = | |
4340 | scrub_infop->dirfrag_stamps.begin(); | |
4341 | ||
4342 | while (i != scrub_infop->dirfrag_stamps.end()) { | |
4343 | if (i->second.scrub_start_version < scrub_infop->scrub_start_version) { | |
4344 | i->second.scrub_start_version = get_projected_version(); | |
4345 | i->second.scrub_start_stamp = ceph_clock_now(); | |
4346 | *out_dirfrag = i->first; | |
4347 | dout(20) << " return frag " << *out_dirfrag << dendl; | |
4348 | return 0; | |
4349 | } | |
4350 | ++i; | |
4351 | } | |
4352 | ||
4353 | dout(20) << " no frags left, ENOENT " << dendl; | |
4354 | return ENOENT; | |
4355 | } | |
4356 | ||
4357 | void CInode::scrub_dirfrags_scrubbing(list<frag_t>* out_dirfrags) | |
4358 | { | |
4359 | assert(out_dirfrags != NULL); | |
4360 | assert(scrub_infop != NULL); | |
4361 | ||
4362 | out_dirfrags->clear(); | |
4363 | std::map<frag_t, scrub_stamp_info_t>::iterator i = | |
4364 | scrub_infop->dirfrag_stamps.begin(); | |
4365 | ||
4366 | while (i != scrub_infop->dirfrag_stamps.end()) { | |
4367 | if (i->second.scrub_start_version >= scrub_infop->scrub_start_version) { | |
4368 | if (i->second.last_scrub_version < scrub_infop->scrub_start_version) | |
4369 | out_dirfrags->push_back(i->first); | |
4370 | } else { | |
4371 | return; | |
4372 | } | |
4373 | ||
4374 | ++i; | |
4375 | } | |
4376 | } | |
4377 | ||
4378 | void CInode::scrub_dirfrag_finished(frag_t dirfrag) | |
4379 | { | |
4380 | dout(20) << __func__ << " on frag " << dirfrag << dendl; | |
4381 | assert(scrub_is_in_progress()); | |
4382 | ||
4383 | std::map<frag_t, scrub_stamp_info_t>::iterator i = | |
4384 | scrub_infop->dirfrag_stamps.find(dirfrag); | |
4385 | assert(i != scrub_infop->dirfrag_stamps.end()); | |
4386 | ||
4387 | scrub_stamp_info_t &si = i->second; | |
4388 | si.last_scrub_stamp = si.scrub_start_stamp; | |
4389 | si.last_scrub_version = si.scrub_start_version; | |
4390 | } | |
4391 | ||
4392 | void CInode::scrub_finished(MDSInternalContextBase **c) { | |
4393 | dout(20) << __func__ << dendl; | |
4394 | assert(scrub_is_in_progress()); | |
4395 | for (std::map<frag_t, scrub_stamp_info_t>::iterator i = | |
4396 | scrub_infop->dirfrag_stamps.begin(); | |
4397 | i != scrub_infop->dirfrag_stamps.end(); | |
4398 | ++i) { | |
4399 | if(i->second.last_scrub_version != i->second.scrub_start_version) { | |
4400 | derr << i->second.last_scrub_version << " != " | |
4401 | << i->second.scrub_start_version << dendl; | |
4402 | } | |
4403 | assert(i->second.last_scrub_version == i->second.scrub_start_version); | |
4404 | } | |
4405 | ||
4406 | scrub_infop->last_scrub_version = scrub_infop->scrub_start_version; | |
4407 | scrub_infop->last_scrub_stamp = scrub_infop->scrub_start_stamp; | |
4408 | scrub_infop->last_scrub_dirty = true; | |
4409 | scrub_infop->scrub_in_progress = false; | |
4410 | ||
4411 | if (scrub_infop->scrub_parent) { | |
4412 | CDentry *dn = scrub_infop->scrub_parent; | |
4413 | scrub_infop->scrub_parent = NULL; | |
4414 | dn->dir->scrub_dentry_finished(dn); | |
4415 | dn->put(CDentry::PIN_SCRUBPARENT); | |
4416 | } | |
4417 | ||
4418 | *c = scrub_infop->on_finish; | |
4419 | scrub_infop->on_finish = NULL; | |
4420 | ||
4421 | if (scrub_infop->header->get_origin() == this) { | |
4422 | // We are at the point that a tagging scrub was initiated | |
4423 | LogChannelRef clog = mdcache->mds->clog; | |
4424 | clog->info() << "scrub complete with tag '" << scrub_infop->header->get_tag() << "'"; | |
4425 | } | |
4426 | } | |
4427 | ||
4428 | int64_t CInode::get_backtrace_pool() const | |
4429 | { | |
4430 | if (is_dir()) { | |
4431 | return mdcache->mds->mdsmap->get_metadata_pool(); | |
4432 | } else { | |
4433 | // Files are required to have an explicit layout that specifies | |
4434 | // a pool | |
4435 | assert(inode.layout.pool_id != -1); | |
4436 | return inode.layout.pool_id; | |
4437 | } | |
4438 | } | |
4439 | ||
31f18b77 FG |
4440 | void CInode::maybe_export_pin(bool update) |
4441 | { | |
4442 | if (!g_conf->mds_bal_export_pin) | |
4443 | return; | |
4444 | if (!is_dir() || !is_normal()) | |
4445 | return; | |
7c673cae | 4446 | |
31f18b77 FG |
4447 | mds_rank_t export_pin = get_export_pin(false); |
4448 | if (export_pin == MDS_RANK_NONE && !update) | |
4449 | return; | |
7c673cae | 4450 | |
31f18b77 FG |
4451 | if (state_test(CInode::STATE_QUEUEDEXPORTPIN)) |
4452 | return; | |
4453 | ||
4454 | bool queue = false; | |
4455 | for (auto p = dirfrags.begin(); p != dirfrags.end(); p++) { | |
4456 | CDir *dir = p->second; | |
4457 | if (!dir->is_auth()) | |
4458 | continue; | |
4459 | if (export_pin != MDS_RANK_NONE) { | |
4460 | if (dir->is_subtree_root()) { | |
4461 | // set auxsubtree bit or export it | |
4462 | if (!dir->state_test(CDir::STATE_AUXSUBTREE) || | |
4463 | export_pin != dir->get_dir_auth().first) | |
4464 | queue = true; | |
4465 | } else { | |
4466 | // create aux subtree or export it | |
4467 | queue = true; | |
7c673cae | 4468 | } |
31f18b77 FG |
4469 | } else { |
4470 | // clear aux subtrees ? | |
4471 | queue = dir->state_test(CDir::STATE_AUXSUBTREE); | |
4472 | } | |
4473 | if (queue) { | |
4474 | state_set(CInode::STATE_QUEUEDEXPORTPIN); | |
7c673cae | 4475 | mdcache->export_pin_queue.insert(this); |
31f18b77 | 4476 | break; |
7c673cae FG |
4477 | } |
4478 | } | |
4479 | } | |
4480 | ||
4481 | void CInode::set_export_pin(mds_rank_t rank) | |
4482 | { | |
4483 | assert(is_dir()); | |
4484 | assert(is_projected()); | |
4485 | get_projected_inode()->export_pin = rank; | |
31f18b77 | 4486 | maybe_export_pin(true); |
7c673cae FG |
4487 | } |
4488 | ||
4489 | mds_rank_t CInode::get_export_pin(bool inherit) const | |
4490 | { | |
4491 | /* An inode that is export pinned may not necessarily be a subtree root, we | |
4492 | * need to traverse the parents. A base or system inode cannot be pinned. | |
4493 | * N.B. inodes not yet linked into a dir (i.e. anonymous inodes) will not | |
4494 | * have a parent yet. | |
4495 | */ | |
4496 | for (const CInode *in = this; !in->is_base() && !in->is_system() && in->get_projected_parent_dn(); in = in->get_projected_parent_dn()->dir->inode) { | |
4497 | mds_rank_t pin = in->get_projected_inode()->export_pin; | |
4498 | if (pin >= 0) { | |
4499 | return pin; | |
4500 | } | |
4501 | if (!inherit) break; | |
4502 | } | |
4503 | return MDS_RANK_NONE; | |
4504 | } | |
4505 | ||
4506 | bool CInode::is_exportable(mds_rank_t dest) const | |
4507 | { | |
4508 | mds_rank_t pin = get_export_pin(); | |
4509 | if (pin == dest) { | |
4510 | return true; | |
4511 | } else if (pin >= 0) { | |
4512 | return false; | |
4513 | } else { | |
4514 | return true; | |
4515 | } | |
4516 | } |