]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | /* | |
4 | * Ceph - scalable distributed file system | |
5 | * | |
6 | * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> | |
7 | * | |
8 | * This is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License version 2.1, as published by the Free Software | |
11 | * Foundation. See file COPYING. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include "include/int_types.h" | |
16 | #include "common/errno.h" | |
17 | ||
18 | #include <string> | |
19 | #include <stdio.h> | |
20 | ||
21 | #include "CInode.h" | |
22 | #include "CDir.h" | |
23 | #include "CDentry.h" | |
24 | ||
25 | #include "MDSRank.h" | |
26 | #include "MDCache.h" | |
27 | #include "MDLog.h" | |
28 | #include "Locker.h" | |
29 | #include "Mutation.h" | |
30 | ||
31 | #include "events/EUpdate.h" | |
32 | ||
33 | #include "osdc/Objecter.h" | |
34 | ||
35 | #include "snap.h" | |
36 | ||
37 | #include "LogSegment.h" | |
38 | ||
39 | #include "common/Clock.h" | |
40 | ||
41 | #include "messages/MLock.h" | |
42 | #include "messages/MClientCaps.h" | |
43 | ||
44 | #include "common/config.h" | |
45 | #include "global/global_context.h" | |
46 | #include "include/assert.h" | |
47 | ||
48 | #include "mds/MDSContinuation.h" | |
49 | #include "mds/InoTable.h" | |
50 | ||
51 | #define dout_context g_ceph_context | |
52 | #define dout_subsys ceph_subsys_mds | |
53 | #undef dout_prefix | |
54 | #define dout_prefix *_dout << "mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") " | |
55 | ||
56 | ||
57 | class CInodeIOContext : public MDSIOContextBase | |
58 | { | |
59 | protected: | |
60 | CInode *in; | |
61 | MDSRank *get_mds() override {return in->mdcache->mds;} | |
62 | public: | |
63 | explicit CInodeIOContext(CInode *in_) : in(in_) { | |
64 | assert(in != NULL); | |
65 | } | |
66 | }; | |
67 | ||
68 | ||
69 | LockType CInode::versionlock_type(CEPH_LOCK_IVERSION); | |
70 | LockType CInode::authlock_type(CEPH_LOCK_IAUTH); | |
71 | LockType CInode::linklock_type(CEPH_LOCK_ILINK); | |
72 | LockType CInode::dirfragtreelock_type(CEPH_LOCK_IDFT); | |
73 | LockType CInode::filelock_type(CEPH_LOCK_IFILE); | |
74 | LockType CInode::xattrlock_type(CEPH_LOCK_IXATTR); | |
75 | LockType CInode::snaplock_type(CEPH_LOCK_ISNAP); | |
76 | LockType CInode::nestlock_type(CEPH_LOCK_INEST); | |
77 | LockType CInode::flocklock_type(CEPH_LOCK_IFLOCK); | |
78 | LockType CInode::policylock_type(CEPH_LOCK_IPOLICY); | |
79 | ||
80 | //int cinode_pins[CINODE_NUM_PINS]; // counts | |
81 | ostream& CInode::print_db_line_prefix(ostream& out) | |
82 | { | |
83 | return out << ceph_clock_now() << " mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") "; | |
84 | } | |
85 | ||
86 | /* | |
87 | * write caps and lock ids | |
88 | */ | |
89 | struct cinode_lock_info_t cinode_lock_info[] = { | |
90 | { CEPH_LOCK_IFILE, CEPH_CAP_ANY_FILE_WR }, | |
91 | { CEPH_LOCK_IAUTH, CEPH_CAP_AUTH_EXCL }, | |
92 | { CEPH_LOCK_ILINK, CEPH_CAP_LINK_EXCL }, | |
93 | { CEPH_LOCK_IXATTR, CEPH_CAP_XATTR_EXCL }, | |
94 | }; | |
95 | int num_cinode_locks = sizeof(cinode_lock_info) / sizeof(cinode_lock_info[0]); | |
96 | ||
97 | ||
98 | ||
99 | ostream& operator<<(ostream& out, const CInode& in) | |
100 | { | |
101 | string path; | |
102 | in.make_path_string(path, true); | |
103 | ||
104 | out << "[inode " << in.inode.ino; | |
105 | out << " [" | |
106 | << (in.is_multiversion() ? "...":"") | |
107 | << in.first << "," << in.last << "]"; | |
108 | out << " " << path << (in.is_dir() ? "/":""); | |
109 | ||
110 | if (in.is_auth()) { | |
111 | out << " auth"; | |
112 | if (in.is_replicated()) | |
113 | out << in.get_replicas(); | |
114 | } else { | |
115 | mds_authority_t a = in.authority(); | |
116 | out << " rep@" << a.first; | |
117 | if (a.second != CDIR_AUTH_UNKNOWN) | |
118 | out << "," << a.second; | |
119 | out << "." << in.get_replica_nonce(); | |
120 | } | |
121 | ||
122 | if (in.is_symlink()) | |
123 | out << " symlink='" << in.symlink << "'"; | |
124 | if (in.is_dir() && !in.dirfragtree.empty()) | |
125 | out << " " << in.dirfragtree; | |
126 | ||
127 | out << " v" << in.get_version(); | |
128 | if (in.get_projected_version() > in.get_version()) | |
129 | out << " pv" << in.get_projected_version(); | |
130 | ||
131 | if (in.is_auth_pinned()) { | |
132 | out << " ap=" << in.get_num_auth_pins() << "+" << in.get_num_nested_auth_pins(); | |
133 | #ifdef MDS_AUTHPIN_SET | |
134 | out << "(" << in.auth_pin_set << ")"; | |
135 | #endif | |
136 | } | |
137 | ||
138 | if (in.snaprealm) | |
139 | out << " snaprealm=" << in.snaprealm; | |
140 | ||
141 | if (in.state_test(CInode::STATE_AMBIGUOUSAUTH)) out << " AMBIGAUTH"; | |
142 | if (in.state_test(CInode::STATE_NEEDSRECOVER)) out << " needsrecover"; | |
143 | if (in.state_test(CInode::STATE_RECOVERING)) out << " recovering"; | |
144 | if (in.state_test(CInode::STATE_DIRTYPARENT)) out << " dirtyparent"; | |
145 | if (in.state_test(CInode::STATE_MISSINGOBJS)) out << " missingobjs"; | |
146 | if (in.is_freezing_inode()) out << " FREEZING=" << in.auth_pin_freeze_allowance; | |
147 | if (in.is_frozen_inode()) out << " FROZEN"; | |
148 | if (in.is_frozen_auth_pin()) out << " FROZEN_AUTHPIN"; | |
149 | ||
150 | const inode_t *pi = in.get_projected_inode(); | |
151 | if (pi->is_truncating()) | |
152 | out << " truncating(" << pi->truncate_from << " to " << pi->truncate_size << ")"; | |
153 | ||
154 | if (in.inode.is_dir()) { | |
155 | out << " " << in.inode.dirstat; | |
156 | if (g_conf->mds_debug_scatterstat && in.is_projected()) { | |
157 | const inode_t *pi = in.get_projected_inode(); | |
158 | out << "->" << pi->dirstat; | |
159 | } | |
160 | } else { | |
161 | out << " s=" << in.inode.size; | |
162 | if (in.inode.nlink != 1) | |
163 | out << " nl=" << in.inode.nlink; | |
164 | } | |
165 | ||
166 | // rstat | |
167 | out << " " << in.inode.rstat; | |
168 | if (!(in.inode.rstat == in.inode.accounted_rstat)) | |
169 | out << "/" << in.inode.accounted_rstat; | |
170 | if (g_conf->mds_debug_scatterstat && in.is_projected()) { | |
171 | const inode_t *pi = in.get_projected_inode(); | |
172 | out << "->" << pi->rstat; | |
173 | if (!(pi->rstat == pi->accounted_rstat)) | |
174 | out << "/" << pi->accounted_rstat; | |
175 | } | |
176 | ||
177 | if (!in.client_need_snapflush.empty()) | |
178 | out << " need_snapflush=" << in.client_need_snapflush; | |
179 | ||
180 | ||
181 | // locks | |
182 | if (!in.authlock.is_sync_and_unlocked()) | |
183 | out << " " << in.authlock; | |
184 | if (!in.linklock.is_sync_and_unlocked()) | |
185 | out << " " << in.linklock; | |
186 | if (in.inode.is_dir()) { | |
187 | if (!in.dirfragtreelock.is_sync_and_unlocked()) | |
188 | out << " " << in.dirfragtreelock; | |
189 | if (!in.snaplock.is_sync_and_unlocked()) | |
190 | out << " " << in.snaplock; | |
191 | if (!in.nestlock.is_sync_and_unlocked()) | |
192 | out << " " << in.nestlock; | |
193 | if (!in.policylock.is_sync_and_unlocked()) | |
194 | out << " " << in.policylock; | |
195 | } else { | |
196 | if (!in.flocklock.is_sync_and_unlocked()) | |
197 | out << " " << in.flocklock; | |
198 | } | |
199 | if (!in.filelock.is_sync_and_unlocked()) | |
200 | out << " " << in.filelock; | |
201 | if (!in.xattrlock.is_sync_and_unlocked()) | |
202 | out << " " << in.xattrlock; | |
203 | if (!in.versionlock.is_sync_and_unlocked()) | |
204 | out << " " << in.versionlock; | |
205 | ||
206 | // hack: spit out crap on which clients have caps | |
207 | if (in.inode.client_ranges.size()) | |
208 | out << " cr=" << in.inode.client_ranges; | |
209 | ||
210 | if (!in.get_client_caps().empty()) { | |
211 | out << " caps={"; | |
212 | for (map<client_t,Capability*>::const_iterator it = in.get_client_caps().begin(); | |
213 | it != in.get_client_caps().end(); | |
214 | ++it) { | |
215 | if (it != in.get_client_caps().begin()) out << ","; | |
216 | out << it->first << "=" | |
217 | << ccap_string(it->second->pending()); | |
218 | if (it->second->issued() != it->second->pending()) | |
219 | out << "/" << ccap_string(it->second->issued()); | |
220 | out << "/" << ccap_string(it->second->wanted()) | |
221 | << "@" << it->second->get_last_sent(); | |
222 | } | |
223 | out << "}"; | |
224 | if (in.get_loner() >= 0 || in.get_wanted_loner() >= 0) { | |
225 | out << ",l=" << in.get_loner(); | |
226 | if (in.get_loner() != in.get_wanted_loner()) | |
227 | out << "(" << in.get_wanted_loner() << ")"; | |
228 | } | |
229 | } | |
230 | if (!in.get_mds_caps_wanted().empty()) { | |
231 | out << " mcw={"; | |
232 | for (compact_map<int,int>::const_iterator p = in.get_mds_caps_wanted().begin(); | |
233 | p != in.get_mds_caps_wanted().end(); | |
234 | ++p) { | |
235 | if (p != in.get_mds_caps_wanted().begin()) | |
236 | out << ','; | |
237 | out << p->first << '=' << ccap_string(p->second); | |
238 | } | |
239 | out << '}'; | |
240 | } | |
241 | ||
242 | if (in.get_num_ref()) { | |
243 | out << " |"; | |
244 | in.print_pin_set(out); | |
245 | } | |
246 | ||
247 | if (in.inode.export_pin != MDS_RANK_NONE) { | |
248 | out << " export_pin=" << in.inode.export_pin; | |
249 | } | |
250 | ||
251 | out << " " << ∈ | |
252 | out << "]"; | |
253 | return out; | |
254 | } | |
255 | ||
256 | ostream& operator<<(ostream& out, const CInode::scrub_stamp_info_t& si) | |
257 | { | |
258 | out << "{scrub_start_version: " << si.scrub_start_version | |
259 | << ", scrub_start_stamp: " << si.scrub_start_stamp | |
260 | << ", last_scrub_version: " << si.last_scrub_version | |
261 | << ", last_scrub_stamp: " << si.last_scrub_stamp; | |
262 | return out; | |
263 | } | |
264 | ||
265 | ||
266 | ||
267 | void CInode::print(ostream& out) | |
268 | { | |
269 | out << *this; | |
270 | } | |
271 | ||
272 | ||
273 | ||
274 | void CInode::add_need_snapflush(CInode *snapin, snapid_t snapid, client_t client) | |
275 | { | |
276 | dout(10) << "add_need_snapflush client." << client << " snapid " << snapid << " on " << snapin << dendl; | |
277 | ||
278 | if (client_need_snapflush.empty()) { | |
279 | get(CInode::PIN_NEEDSNAPFLUSH); | |
280 | ||
281 | // FIXME: this is non-optimal, as we'll block freezes/migrations for potentially | |
282 | // long periods waiting for clients to flush their snaps. | |
283 | auth_pin(this); // pin head inode... | |
284 | } | |
285 | ||
286 | set<client_t>& clients = client_need_snapflush[snapid]; | |
287 | if (clients.empty()) | |
288 | snapin->auth_pin(this); // ...and pin snapped/old inode! | |
289 | ||
290 | clients.insert(client); | |
291 | } | |
292 | ||
293 | void CInode::remove_need_snapflush(CInode *snapin, snapid_t snapid, client_t client) | |
294 | { | |
295 | dout(10) << "remove_need_snapflush client." << client << " snapid " << snapid << " on " << snapin << dendl; | |
296 | compact_map<snapid_t, std::set<client_t> >::iterator p = client_need_snapflush.find(snapid); | |
297 | if (p == client_need_snapflush.end()) { | |
298 | dout(10) << " snapid not found" << dendl; | |
299 | return; | |
300 | } | |
301 | if (!p->second.count(client)) { | |
302 | dout(10) << " client not found" << dendl; | |
303 | return; | |
304 | } | |
305 | p->second.erase(client); | |
306 | if (p->second.empty()) { | |
307 | client_need_snapflush.erase(p); | |
308 | snapin->auth_unpin(this); | |
309 | ||
310 | if (client_need_snapflush.empty()) { | |
311 | put(CInode::PIN_NEEDSNAPFLUSH); | |
312 | auth_unpin(this); | |
313 | } | |
314 | } | |
315 | } | |
316 | ||
317 | bool CInode::split_need_snapflush(CInode *cowin, CInode *in) | |
318 | { | |
319 | dout(10) << "split_need_snapflush [" << cowin->first << "," << cowin->last << "] for " << *cowin << dendl; | |
320 | bool need_flush = false; | |
321 | for (compact_map<snapid_t, set<client_t> >::iterator p = client_need_snapflush.lower_bound(cowin->first); | |
322 | p != client_need_snapflush.end() && p->first < in->first; ) { | |
323 | compact_map<snapid_t, set<client_t> >::iterator q = p; | |
324 | ++p; | |
325 | assert(!q->second.empty()); | |
326 | if (cowin->last >= q->first) { | |
327 | cowin->auth_pin(this); | |
328 | need_flush = true; | |
329 | } else | |
330 | client_need_snapflush.erase(q); | |
331 | in->auth_unpin(this); | |
332 | } | |
333 | return need_flush; | |
334 | } | |
335 | ||
336 | void CInode::mark_dirty_rstat() | |
337 | { | |
338 | if (!state_test(STATE_DIRTYRSTAT)) { | |
339 | dout(10) << "mark_dirty_rstat" << dendl; | |
340 | state_set(STATE_DIRTYRSTAT); | |
341 | get(PIN_DIRTYRSTAT); | |
342 | CDentry *dn = get_projected_parent_dn(); | |
343 | CDir *pdir = dn->dir; | |
344 | pdir->dirty_rstat_inodes.push_back(&dirty_rstat_item); | |
345 | ||
346 | mdcache->mds->locker->mark_updated_scatterlock(&pdir->inode->nestlock); | |
347 | } | |
348 | } | |
349 | void CInode::clear_dirty_rstat() | |
350 | { | |
351 | if (state_test(STATE_DIRTYRSTAT)) { | |
352 | dout(10) << "clear_dirty_rstat" << dendl; | |
353 | state_clear(STATE_DIRTYRSTAT); | |
354 | put(PIN_DIRTYRSTAT); | |
355 | dirty_rstat_item.remove_myself(); | |
356 | } | |
357 | } | |
358 | ||
359 | inode_t *CInode::project_inode(map<string,bufferptr> *px) | |
360 | { | |
361 | if (projected_nodes.empty()) { | |
362 | projected_nodes.push_back(new projected_inode_t(new inode_t(inode))); | |
363 | if (px) | |
364 | *px = xattrs; | |
365 | } else { | |
366 | projected_nodes.push_back(new projected_inode_t( | |
367 | new inode_t(*projected_nodes.back()->inode))); | |
368 | if (px) | |
369 | *px = *get_projected_xattrs(); | |
370 | } | |
371 | ||
372 | projected_inode_t &pi = *projected_nodes.back(); | |
373 | ||
374 | if (px) { | |
375 | pi.xattrs = px; | |
376 | ++num_projected_xattrs; | |
377 | } | |
378 | ||
379 | if (scrub_infop && scrub_infop->last_scrub_dirty) { | |
380 | pi.inode->last_scrub_stamp = scrub_infop->last_scrub_stamp; | |
381 | pi.inode->last_scrub_version = scrub_infop->last_scrub_version; | |
382 | scrub_infop->last_scrub_dirty = false; | |
383 | scrub_maybe_delete_info(); | |
384 | } | |
385 | dout(15) << "project_inode " << pi.inode << dendl; | |
386 | return pi.inode; | |
387 | } | |
388 | ||
389 | void CInode::pop_and_dirty_projected_inode(LogSegment *ls) | |
390 | { | |
391 | assert(!projected_nodes.empty()); | |
392 | dout(15) << "pop_and_dirty_projected_inode " << projected_nodes.front()->inode | |
393 | << " v" << projected_nodes.front()->inode->version << dendl; | |
394 | int64_t old_pool = inode.layout.pool_id; | |
395 | ||
396 | mark_dirty(projected_nodes.front()->inode->version, ls); | |
397 | inode = *projected_nodes.front()->inode; | |
398 | ||
399 | if (inode.is_backtrace_updated()) | |
400 | _mark_dirty_parent(ls, old_pool != inode.layout.pool_id); | |
401 | ||
402 | map<string,bufferptr> *px = projected_nodes.front()->xattrs; | |
403 | if (px) { | |
404 | --num_projected_xattrs; | |
405 | xattrs = *px; | |
406 | delete px; | |
407 | } | |
408 | ||
409 | if (projected_nodes.front()->snapnode) { | |
410 | pop_projected_snaprealm(projected_nodes.front()->snapnode); | |
411 | --num_projected_srnodes; | |
412 | } | |
413 | ||
414 | delete projected_nodes.front()->inode; | |
415 | delete projected_nodes.front(); | |
416 | ||
417 | projected_nodes.pop_front(); | |
418 | } | |
419 | ||
420 | sr_t *CInode::project_snaprealm(snapid_t snapid) | |
421 | { | |
422 | sr_t *cur_srnode = get_projected_srnode(); | |
423 | sr_t *new_srnode; | |
424 | ||
425 | if (cur_srnode) { | |
426 | new_srnode = new sr_t(*cur_srnode); | |
427 | } else { | |
428 | new_srnode = new sr_t(); | |
429 | new_srnode->created = snapid; | |
430 | new_srnode->current_parent_since = get_oldest_snap(); | |
431 | } | |
432 | dout(10) << "project_snaprealm " << new_srnode << dendl; | |
433 | projected_nodes.back()->snapnode = new_srnode; | |
434 | ++num_projected_srnodes; | |
435 | return new_srnode; | |
436 | } | |
437 | ||
438 | /* if newparent != parent, add parent to past_parents | |
439 | if parent DNE, we need to find what the parent actually is and fill that in */ | |
440 | void CInode::project_past_snaprealm_parent(SnapRealm *newparent) | |
441 | { | |
442 | sr_t *new_snap = project_snaprealm(); | |
443 | SnapRealm *oldparent; | |
444 | if (!snaprealm) { | |
445 | oldparent = find_snaprealm(); | |
446 | new_snap->seq = oldparent->get_newest_seq(); | |
447 | } | |
448 | else | |
449 | oldparent = snaprealm->parent; | |
450 | ||
451 | if (newparent != oldparent) { | |
452 | snapid_t oldparentseq = oldparent->get_newest_seq(); | |
453 | if (oldparentseq + 1 > new_snap->current_parent_since) { | |
454 | new_snap->past_parents[oldparentseq].ino = oldparent->inode->ino(); | |
455 | new_snap->past_parents[oldparentseq].first = new_snap->current_parent_since; | |
456 | } | |
457 | new_snap->current_parent_since = MAX(oldparentseq, newparent->get_last_created()) + 1; | |
458 | } | |
459 | } | |
460 | ||
461 | void CInode::pop_projected_snaprealm(sr_t *next_snaprealm) | |
462 | { | |
463 | assert(next_snaprealm); | |
464 | dout(10) << "pop_projected_snaprealm " << next_snaprealm | |
465 | << " seq" << next_snaprealm->seq << dendl; | |
466 | bool invalidate_cached_snaps = false; | |
467 | if (!snaprealm) { | |
468 | open_snaprealm(); | |
469 | } else if (next_snaprealm->past_parents.size() != | |
470 | snaprealm->srnode.past_parents.size()) { | |
471 | invalidate_cached_snaps = true; | |
472 | // re-open past parents | |
473 | snaprealm->_close_parents(); | |
474 | ||
475 | dout(10) << " realm " << *snaprealm << " past_parents " << snaprealm->srnode.past_parents | |
476 | << " -> " << next_snaprealm->past_parents << dendl; | |
477 | } | |
478 | snaprealm->srnode = *next_snaprealm; | |
479 | delete next_snaprealm; | |
480 | ||
481 | // we should be able to open these up (or have them already be open). | |
482 | bool ok = snaprealm->_open_parents(NULL); | |
483 | assert(ok); | |
484 | ||
485 | if (invalidate_cached_snaps) | |
486 | snaprealm->invalidate_cached_snaps(); | |
487 | ||
488 | if (snaprealm->parent) | |
489 | dout(10) << " realm " << *snaprealm << " parent " << *snaprealm->parent << dendl; | |
490 | } | |
491 | ||
492 | ||
493 | // ====== CInode ======= | |
494 | ||
495 | // dirfrags | |
496 | ||
497 | __u32 InodeStoreBase::hash_dentry_name(const string &dn) | |
498 | { | |
499 | int which = inode.dir_layout.dl_dir_hash; | |
500 | if (!which) | |
501 | which = CEPH_STR_HASH_LINUX; | |
502 | assert(ceph_str_hash_valid(which)); | |
503 | return ceph_str_hash(which, dn.data(), dn.length()); | |
504 | } | |
505 | ||
506 | frag_t InodeStoreBase::pick_dirfrag(const string& dn) | |
507 | { | |
508 | if (dirfragtree.empty()) | |
509 | return frag_t(); // avoid the string hash if we can. | |
510 | ||
511 | __u32 h = hash_dentry_name(dn); | |
512 | return dirfragtree[h]; | |
513 | } | |
514 | ||
515 | bool CInode::get_dirfrags_under(frag_t fg, list<CDir*>& ls) | |
516 | { | |
517 | bool all = true; | |
518 | list<frag_t> fglist; | |
519 | dirfragtree.get_leaves_under(fg, fglist); | |
520 | for (list<frag_t>::iterator p = fglist.begin(); p != fglist.end(); ++p) | |
521 | if (dirfrags.count(*p)) | |
522 | ls.push_back(dirfrags[*p]); | |
523 | else | |
524 | all = false; | |
525 | ||
526 | if (all) | |
527 | return all; | |
528 | ||
529 | fragtree_t tmpdft; | |
530 | tmpdft.force_to_leaf(g_ceph_context, fg); | |
531 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); p != dirfrags.end(); ++p) { | |
532 | tmpdft.force_to_leaf(g_ceph_context, p->first); | |
533 | if (fg.contains(p->first) && !dirfragtree.is_leaf(p->first)) | |
534 | ls.push_back(p->second); | |
535 | } | |
536 | ||
537 | all = true; | |
538 | tmpdft.get_leaves_under(fg, fglist); | |
539 | for (list<frag_t>::iterator p = fglist.begin(); p != fglist.end(); ++p) | |
540 | if (!dirfrags.count(*p)) { | |
541 | all = false; | |
542 | break; | |
543 | } | |
544 | ||
545 | return all; | |
546 | } | |
547 | ||
548 | void CInode::verify_dirfrags() | |
549 | { | |
550 | bool bad = false; | |
551 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); p != dirfrags.end(); ++p) { | |
552 | if (!dirfragtree.is_leaf(p->first)) { | |
553 | dout(0) << "have open dirfrag " << p->first << " but not leaf in " << dirfragtree | |
554 | << ": " << *p->second << dendl; | |
555 | bad = true; | |
556 | } | |
557 | } | |
558 | assert(!bad); | |
559 | } | |
560 | ||
561 | void CInode::force_dirfrags() | |
562 | { | |
563 | bool bad = false; | |
564 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); p != dirfrags.end(); ++p) { | |
565 | if (!dirfragtree.is_leaf(p->first)) { | |
566 | dout(0) << "have open dirfrag " << p->first << " but not leaf in " << dirfragtree | |
567 | << ": " << *p->second << dendl; | |
568 | bad = true; | |
569 | } | |
570 | } | |
571 | ||
572 | if (bad) { | |
573 | list<frag_t> leaves; | |
574 | dirfragtree.get_leaves(leaves); | |
575 | for (list<frag_t>::iterator p = leaves.begin(); p != leaves.end(); ++p) | |
576 | mdcache->get_force_dirfrag(dirfrag_t(ino(),*p), true); | |
577 | } | |
578 | ||
579 | verify_dirfrags(); | |
580 | } | |
581 | ||
582 | CDir *CInode::get_approx_dirfrag(frag_t fg) | |
583 | { | |
584 | CDir *dir = get_dirfrag(fg); | |
585 | if (dir) return dir; | |
586 | ||
587 | // find a child? | |
588 | list<CDir*> ls; | |
589 | get_dirfrags_under(fg, ls); | |
590 | if (!ls.empty()) | |
591 | return ls.front(); | |
592 | ||
593 | // try parents? | |
594 | while (fg.bits() > 0) { | |
595 | fg = fg.parent(); | |
596 | dir = get_dirfrag(fg); | |
597 | if (dir) return dir; | |
598 | } | |
599 | return NULL; | |
600 | } | |
601 | ||
602 | void CInode::get_dirfrags(list<CDir*>& ls) | |
603 | { | |
604 | // all dirfrags | |
605 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
606 | p != dirfrags.end(); | |
607 | ++p) | |
608 | ls.push_back(p->second); | |
609 | } | |
610 | void CInode::get_nested_dirfrags(list<CDir*>& ls) | |
611 | { | |
612 | // dirfrags in same subtree | |
613 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
614 | p != dirfrags.end(); | |
615 | ++p) | |
616 | if (!p->second->is_subtree_root()) | |
617 | ls.push_back(p->second); | |
618 | } | |
619 | void CInode::get_subtree_dirfrags(list<CDir*>& ls) | |
620 | { | |
621 | // dirfrags that are roots of new subtrees | |
622 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
623 | p != dirfrags.end(); | |
624 | ++p) | |
625 | if (p->second->is_subtree_root()) | |
626 | ls.push_back(p->second); | |
627 | } | |
628 | ||
629 | ||
630 | CDir *CInode::get_or_open_dirfrag(MDCache *mdcache, frag_t fg) | |
631 | { | |
632 | assert(is_dir()); | |
633 | ||
634 | // have it? | |
635 | CDir *dir = get_dirfrag(fg); | |
636 | if (!dir) { | |
637 | // create it. | |
638 | assert(is_auth() || mdcache->mds->is_any_replay()); | |
639 | dir = new CDir(this, fg, mdcache, is_auth()); | |
640 | add_dirfrag(dir); | |
641 | } | |
642 | return dir; | |
643 | } | |
644 | ||
645 | CDir *CInode::add_dirfrag(CDir *dir) | |
646 | { | |
647 | assert(dirfrags.count(dir->dirfrag().frag) == 0); | |
648 | dirfrags[dir->dirfrag().frag] = dir; | |
649 | ||
650 | if (stickydir_ref > 0) { | |
651 | dir->state_set(CDir::STATE_STICKY); | |
652 | dir->get(CDir::PIN_STICKY); | |
653 | } | |
654 | ||
655 | maybe_export_pin(); | |
656 | ||
657 | return dir; | |
658 | } | |
659 | ||
660 | void CInode::close_dirfrag(frag_t fg) | |
661 | { | |
662 | dout(14) << "close_dirfrag " << fg << dendl; | |
663 | assert(dirfrags.count(fg)); | |
664 | ||
665 | CDir *dir = dirfrags[fg]; | |
666 | dir->remove_null_dentries(); | |
667 | ||
668 | // clear dirty flag | |
669 | if (dir->is_dirty()) | |
670 | dir->mark_clean(); | |
671 | ||
672 | if (stickydir_ref > 0) { | |
673 | dir->state_clear(CDir::STATE_STICKY); | |
674 | dir->put(CDir::PIN_STICKY); | |
675 | } | |
676 | ||
677 | // dump any remaining dentries, for debugging purposes | |
678 | for (CDir::map_t::iterator p = dir->items.begin(); | |
679 | p != dir->items.end(); | |
680 | ++p) | |
681 | dout(14) << "close_dirfrag LEFTOVER dn " << *p->second << dendl; | |
682 | ||
683 | assert(dir->get_num_ref() == 0); | |
684 | delete dir; | |
685 | dirfrags.erase(fg); | |
686 | } | |
687 | ||
688 | void CInode::close_dirfrags() | |
689 | { | |
690 | while (!dirfrags.empty()) | |
691 | close_dirfrag(dirfrags.begin()->first); | |
692 | } | |
693 | ||
694 | bool CInode::has_subtree_root_dirfrag(int auth) | |
695 | { | |
696 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
697 | p != dirfrags.end(); | |
698 | ++p) | |
699 | if (p->second->is_subtree_root() && | |
700 | (auth == -1 || p->second->dir_auth.first == auth)) | |
701 | return true; | |
702 | return false; | |
703 | } | |
704 | ||
705 | bool CInode::has_subtree_or_exporting_dirfrag() | |
706 | { | |
707 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
708 | p != dirfrags.end(); | |
709 | ++p) | |
710 | if (p->second->is_subtree_root() || | |
711 | p->second->state_test(CDir::STATE_EXPORTING)) | |
712 | return true; | |
713 | return false; | |
714 | } | |
715 | ||
716 | void CInode::get_stickydirs() | |
717 | { | |
718 | if (stickydir_ref == 0) { | |
719 | get(PIN_STICKYDIRS); | |
720 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
721 | p != dirfrags.end(); | |
722 | ++p) { | |
723 | p->second->state_set(CDir::STATE_STICKY); | |
724 | p->second->get(CDir::PIN_STICKY); | |
725 | } | |
726 | } | |
727 | stickydir_ref++; | |
728 | } | |
729 | ||
730 | void CInode::put_stickydirs() | |
731 | { | |
732 | assert(stickydir_ref > 0); | |
733 | stickydir_ref--; | |
734 | if (stickydir_ref == 0) { | |
735 | put(PIN_STICKYDIRS); | |
736 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
737 | p != dirfrags.end(); | |
738 | ++p) { | |
739 | p->second->state_clear(CDir::STATE_STICKY); | |
740 | p->second->put(CDir::PIN_STICKY); | |
741 | } | |
742 | } | |
743 | } | |
744 | ||
745 | ||
746 | ||
747 | ||
748 | ||
749 | // pins | |
750 | ||
751 | void CInode::first_get() | |
752 | { | |
753 | // pin my dentry? | |
754 | if (parent) | |
755 | parent->get(CDentry::PIN_INODEPIN); | |
756 | } | |
757 | ||
758 | void CInode::last_put() | |
759 | { | |
760 | // unpin my dentry? | |
761 | if (parent) | |
762 | parent->put(CDentry::PIN_INODEPIN); | |
763 | } | |
764 | ||
765 | void CInode::_put() | |
766 | { | |
767 | if (get_num_ref() == (int)is_dirty() + (int)is_dirty_parent()) | |
768 | mdcache->maybe_eval_stray(this, true); | |
769 | } | |
770 | ||
771 | void CInode::add_remote_parent(CDentry *p) | |
772 | { | |
773 | if (remote_parents.empty()) | |
774 | get(PIN_REMOTEPARENT); | |
775 | remote_parents.insert(p); | |
776 | } | |
777 | void CInode::remove_remote_parent(CDentry *p) | |
778 | { | |
779 | remote_parents.erase(p); | |
780 | if (remote_parents.empty()) | |
781 | put(PIN_REMOTEPARENT); | |
782 | } | |
783 | ||
784 | ||
785 | ||
786 | ||
787 | CDir *CInode::get_parent_dir() | |
788 | { | |
789 | if (parent) | |
790 | return parent->dir; | |
791 | return NULL; | |
792 | } | |
793 | CDir *CInode::get_projected_parent_dir() | |
794 | { | |
795 | CDentry *p = get_projected_parent_dn(); | |
796 | if (p) | |
797 | return p->dir; | |
798 | return NULL; | |
799 | } | |
800 | CInode *CInode::get_parent_inode() | |
801 | { | |
802 | if (parent) | |
803 | return parent->dir->inode; | |
804 | return NULL; | |
805 | } | |
806 | ||
807 | bool CInode::is_projected_ancestor_of(CInode *other) | |
808 | { | |
809 | while (other) { | |
810 | if (other == this) | |
811 | return true; | |
812 | if (!other->get_projected_parent_dn()) | |
813 | break; | |
814 | other = other->get_projected_parent_dn()->get_dir()->get_inode(); | |
815 | } | |
816 | return false; | |
817 | } | |
818 | ||
819 | /* | |
820 | * Because a non-directory inode may have multiple links, the use_parent | |
821 | * argument allows selecting which parent to use for path construction. This | |
822 | * argument is only meaningful for the final component (i.e. the first of the | |
823 | * nested calls) because directories cannot have multiple hard links. If | |
824 | * use_parent is NULL and projected is true, the primary parent's projected | |
825 | * inode is used all the way up the path chain. Otherwise the primary parent | |
826 | * stable inode is used. | |
827 | */ | |
828 | void CInode::make_path_string(string& s, bool projected, const CDentry *use_parent) const | |
829 | { | |
830 | if (!use_parent) { | |
831 | use_parent = projected ? get_projected_parent_dn() : parent; | |
832 | } | |
833 | ||
834 | if (use_parent) { | |
835 | use_parent->make_path_string(s, projected); | |
836 | } else if (is_root()) { | |
837 | s = ""; | |
838 | } else if (is_mdsdir()) { | |
839 | char t[40]; | |
840 | uint64_t eino(ino()); | |
841 | eino -= MDS_INO_MDSDIR_OFFSET; | |
842 | snprintf(t, sizeof(t), "~mds%" PRId64, eino); | |
843 | s = t; | |
844 | } else { | |
845 | char n[40]; | |
846 | uint64_t eino(ino()); | |
847 | snprintf(n, sizeof(n), "#%" PRIx64, eino); | |
848 | s += n; | |
849 | } | |
850 | } | |
851 | ||
852 | void CInode::make_path(filepath& fp, bool projected) const | |
853 | { | |
854 | const CDentry *use_parent = projected ? get_projected_parent_dn() : parent; | |
855 | if (use_parent) { | |
856 | assert(!is_base()); | |
857 | use_parent->make_path(fp, projected); | |
858 | } else { | |
859 | fp = filepath(ino()); | |
860 | } | |
861 | } | |
862 | ||
863 | void CInode::name_stray_dentry(string& dname) | |
864 | { | |
865 | char s[20]; | |
866 | snprintf(s, sizeof(s), "%llx", (unsigned long long)inode.ino.val); | |
867 | dname = s; | |
868 | } | |
869 | ||
870 | version_t CInode::pre_dirty() | |
871 | { | |
872 | version_t pv; | |
873 | CDentry* _cdentry = get_projected_parent_dn(); | |
874 | if (_cdentry) { | |
875 | pv = _cdentry->pre_dirty(get_projected_version()); | |
876 | dout(10) << "pre_dirty " << pv << " (current v " << inode.version << ")" << dendl; | |
877 | } else { | |
878 | assert(is_base()); | |
879 | pv = get_projected_version() + 1; | |
880 | } | |
881 | // force update backtrace for old format inode (see inode_t::decode) | |
882 | if (inode.backtrace_version == 0 && !projected_nodes.empty()) { | |
883 | inode_t *pi = projected_nodes.back()->inode; | |
884 | if (pi->backtrace_version == 0) | |
885 | pi->update_backtrace(pv); | |
886 | } | |
887 | return pv; | |
888 | } | |
889 | ||
890 | void CInode::_mark_dirty(LogSegment *ls) | |
891 | { | |
892 | if (!state_test(STATE_DIRTY)) { | |
893 | state_set(STATE_DIRTY); | |
894 | get(PIN_DIRTY); | |
895 | assert(ls); | |
896 | } | |
897 | ||
898 | // move myself to this segment's dirty list | |
899 | if (ls) | |
900 | ls->dirty_inodes.push_back(&item_dirty); | |
901 | } | |
902 | ||
903 | void CInode::mark_dirty(version_t pv, LogSegment *ls) { | |
904 | ||
905 | dout(10) << "mark_dirty " << *this << dendl; | |
906 | ||
907 | /* | |
908 | NOTE: I may already be dirty, but this fn _still_ needs to be called so that | |
909 | the directory is (perhaps newly) dirtied, and so that parent_dir_version is | |
910 | updated below. | |
911 | */ | |
912 | ||
913 | // only auth can get dirty. "dirty" async data in replicas is relative to | |
914 | // filelock state, not the dirty flag. | |
915 | assert(is_auth()); | |
916 | ||
917 | // touch my private version | |
918 | assert(inode.version < pv); | |
919 | inode.version = pv; | |
920 | _mark_dirty(ls); | |
921 | ||
922 | // mark dentry too | |
923 | if (parent) | |
924 | parent->mark_dirty(pv, ls); | |
925 | } | |
926 | ||
927 | ||
928 | void CInode::mark_clean() | |
929 | { | |
930 | dout(10) << " mark_clean " << *this << dendl; | |
931 | if (state_test(STATE_DIRTY)) { | |
932 | state_clear(STATE_DIRTY); | |
933 | put(PIN_DIRTY); | |
934 | ||
935 | // remove myself from ls dirty list | |
936 | item_dirty.remove_myself(); | |
937 | } | |
938 | } | |
939 | ||
940 | ||
941 | // -------------- | |
942 | // per-inode storage | |
943 | // (currently for root inode only) | |
944 | ||
945 | struct C_IO_Inode_Stored : public CInodeIOContext { | |
946 | version_t version; | |
947 | Context *fin; | |
948 | C_IO_Inode_Stored(CInode *i, version_t v, Context *f) : CInodeIOContext(i), version(v), fin(f) {} | |
949 | void finish(int r) override { | |
950 | in->_stored(r, version, fin); | |
951 | } | |
952 | }; | |
953 | ||
954 | object_t InodeStoreBase::get_object_name(inodeno_t ino, frag_t fg, const char *suffix) | |
955 | { | |
956 | char n[60]; | |
957 | snprintf(n, sizeof(n), "%llx.%08llx%s", (long long unsigned)ino, (long long unsigned)fg, suffix ? suffix : ""); | |
958 | return object_t(n); | |
959 | } | |
960 | ||
961 | void CInode::store(MDSInternalContextBase *fin) | |
962 | { | |
963 | dout(10) << "store " << get_version() << dendl; | |
964 | assert(is_base()); | |
965 | ||
966 | if (snaprealm) | |
967 | purge_stale_snap_data(snaprealm->get_snaps()); | |
968 | ||
969 | // encode | |
970 | bufferlist bl; | |
971 | string magic = CEPH_FS_ONDISK_MAGIC; | |
972 | ::encode(magic, bl); | |
973 | encode_store(bl, mdcache->mds->mdsmap->get_up_features()); | |
974 | ||
975 | // write it. | |
976 | SnapContext snapc; | |
977 | ObjectOperation m; | |
978 | m.write_full(bl); | |
979 | ||
980 | object_t oid = CInode::get_object_name(ino(), frag_t(), ".inode"); | |
981 | object_locator_t oloc(mdcache->mds->mdsmap->get_metadata_pool()); | |
982 | ||
983 | Context *newfin = | |
984 | new C_OnFinisher(new C_IO_Inode_Stored(this, get_version(), fin), | |
985 | mdcache->mds->finisher); | |
986 | mdcache->mds->objecter->mutate(oid, oloc, m, snapc, | |
987 | ceph::real_clock::now(), 0, | |
988 | newfin); | |
989 | } | |
990 | ||
991 | void CInode::_stored(int r, version_t v, Context *fin) | |
992 | { | |
993 | if (r < 0) { | |
994 | dout(1) << "store error " << r << " v " << v << " on " << *this << dendl; | |
995 | mdcache->mds->clog->error() << "failed to store ino " << ino() << " object," | |
996 | << " errno " << r; | |
997 | mdcache->mds->handle_write_error(r); | |
998 | fin->complete(r); | |
999 | return; | |
1000 | } | |
1001 | ||
1002 | dout(10) << "_stored " << v << " on " << *this << dendl; | |
1003 | if (v == get_projected_version()) | |
1004 | mark_clean(); | |
1005 | ||
1006 | fin->complete(0); | |
1007 | } | |
1008 | ||
1009 | void CInode::flush(MDSInternalContextBase *fin) | |
1010 | { | |
1011 | dout(10) << "flush " << *this << dendl; | |
1012 | assert(is_auth() && can_auth_pin()); | |
1013 | ||
1014 | MDSGatherBuilder gather(g_ceph_context); | |
1015 | ||
1016 | if (is_dirty_parent()) { | |
1017 | store_backtrace(gather.new_sub()); | |
1018 | } | |
1019 | if (is_dirty()) { | |
1020 | if (is_base()) { | |
1021 | store(gather.new_sub()); | |
1022 | } else { | |
1023 | parent->dir->commit(0, gather.new_sub()); | |
1024 | } | |
1025 | } | |
1026 | ||
1027 | if (gather.has_subs()) { | |
1028 | gather.set_finisher(fin); | |
1029 | gather.activate(); | |
1030 | } else { | |
1031 | fin->complete(0); | |
1032 | } | |
1033 | } | |
1034 | ||
1035 | struct C_IO_Inode_Fetched : public CInodeIOContext { | |
1036 | bufferlist bl, bl2; | |
1037 | Context *fin; | |
1038 | C_IO_Inode_Fetched(CInode *i, Context *f) : CInodeIOContext(i), fin(f) {} | |
1039 | void finish(int r) override { | |
1040 | // Ignore 'r', because we fetch from two places, so r is usually ENOENT | |
1041 | in->_fetched(bl, bl2, fin); | |
1042 | } | |
1043 | }; | |
1044 | ||
1045 | void CInode::fetch(MDSInternalContextBase *fin) | |
1046 | { | |
1047 | dout(10) << "fetch" << dendl; | |
1048 | ||
1049 | C_IO_Inode_Fetched *c = new C_IO_Inode_Fetched(this, fin); | |
1050 | C_GatherBuilder gather(g_ceph_context, new C_OnFinisher(c, mdcache->mds->finisher)); | |
1051 | ||
1052 | object_t oid = CInode::get_object_name(ino(), frag_t(), ""); | |
1053 | object_locator_t oloc(mdcache->mds->mdsmap->get_metadata_pool()); | |
1054 | ||
1055 | // Old on-disk format: inode stored in xattr of a dirfrag | |
1056 | ObjectOperation rd; | |
1057 | rd.getxattr("inode", &c->bl, NULL); | |
1058 | mdcache->mds->objecter->read(oid, oloc, rd, CEPH_NOSNAP, (bufferlist*)NULL, 0, gather.new_sub()); | |
1059 | ||
1060 | // Current on-disk format: inode stored in a .inode object | |
1061 | object_t oid2 = CInode::get_object_name(ino(), frag_t(), ".inode"); | |
1062 | mdcache->mds->objecter->read(oid2, oloc, 0, 0, CEPH_NOSNAP, &c->bl2, 0, gather.new_sub()); | |
1063 | ||
1064 | gather.activate(); | |
1065 | } | |
1066 | ||
1067 | void CInode::_fetched(bufferlist& bl, bufferlist& bl2, Context *fin) | |
1068 | { | |
1069 | dout(10) << "_fetched got " << bl.length() << " and " << bl2.length() << dendl; | |
1070 | bufferlist::iterator p; | |
1071 | if (bl2.length()) { | |
1072 | p = bl2.begin(); | |
1073 | } else if (bl.length()) { | |
1074 | p = bl.begin(); | |
1075 | } else { | |
1076 | derr << "No data while reading inode 0x" << std::hex << ino() | |
1077 | << std::dec << dendl; | |
1078 | fin->complete(-ENOENT); | |
1079 | return; | |
1080 | } | |
1081 | ||
1082 | // Attempt decode | |
1083 | try { | |
1084 | string magic; | |
1085 | ::decode(magic, p); | |
1086 | dout(10) << " magic is '" << magic << "' (expecting '" | |
1087 | << CEPH_FS_ONDISK_MAGIC << "')" << dendl; | |
1088 | if (magic != CEPH_FS_ONDISK_MAGIC) { | |
1089 | dout(0) << "on disk magic '" << magic << "' != my magic '" << CEPH_FS_ONDISK_MAGIC | |
1090 | << "'" << dendl; | |
1091 | fin->complete(-EINVAL); | |
1092 | } else { | |
1093 | decode_store(p); | |
1094 | dout(10) << "_fetched " << *this << dendl; | |
1095 | fin->complete(0); | |
1096 | } | |
1097 | } catch (buffer::error &err) { | |
1098 | derr << "Corrupt inode 0x" << std::hex << ino() << std::dec | |
1099 | << ": " << err << dendl; | |
1100 | fin->complete(-EINVAL); | |
1101 | return; | |
1102 | } | |
1103 | } | |
1104 | ||
1105 | void CInode::build_backtrace(int64_t pool, inode_backtrace_t& bt) | |
1106 | { | |
1107 | bt.ino = inode.ino; | |
1108 | bt.ancestors.clear(); | |
1109 | bt.pool = pool; | |
1110 | ||
1111 | CInode *in = this; | |
1112 | CDentry *pdn = get_parent_dn(); | |
1113 | while (pdn) { | |
1114 | CInode *diri = pdn->get_dir()->get_inode(); | |
1115 | bt.ancestors.push_back(inode_backpointer_t(diri->ino(), pdn->name, in->inode.version)); | |
1116 | in = diri; | |
1117 | pdn = in->get_parent_dn(); | |
1118 | } | |
1119 | for (compact_set<int64_t>::iterator i = inode.old_pools.begin(); | |
1120 | i != inode.old_pools.end(); | |
1121 | ++i) { | |
1122 | // don't add our own pool id to old_pools to avoid looping (e.g. setlayout 0, 1, 0) | |
1123 | if (*i != pool) | |
1124 | bt.old_pools.insert(*i); | |
1125 | } | |
1126 | } | |
1127 | ||
1128 | struct C_IO_Inode_StoredBacktrace : public CInodeIOContext { | |
1129 | version_t version; | |
1130 | Context *fin; | |
1131 | C_IO_Inode_StoredBacktrace(CInode *i, version_t v, Context *f) : CInodeIOContext(i), version(v), fin(f) {} | |
1132 | void finish(int r) override { | |
1133 | in->_stored_backtrace(r, version, fin); | |
1134 | } | |
1135 | }; | |
1136 | ||
1137 | void CInode::store_backtrace(MDSInternalContextBase *fin, int op_prio) | |
1138 | { | |
1139 | dout(10) << "store_backtrace on " << *this << dendl; | |
1140 | assert(is_dirty_parent()); | |
1141 | ||
1142 | if (op_prio < 0) | |
1143 | op_prio = CEPH_MSG_PRIO_DEFAULT; | |
1144 | ||
1145 | auth_pin(this); | |
1146 | ||
1147 | const int64_t pool = get_backtrace_pool(); | |
1148 | inode_backtrace_t bt; | |
1149 | build_backtrace(pool, bt); | |
1150 | bufferlist parent_bl; | |
1151 | ::encode(bt, parent_bl); | |
1152 | ||
1153 | ObjectOperation op; | |
1154 | op.priority = op_prio; | |
1155 | op.create(false); | |
1156 | op.setxattr("parent", parent_bl); | |
1157 | ||
1158 | bufferlist layout_bl; | |
1159 | ::encode(inode.layout, layout_bl, mdcache->mds->mdsmap->get_up_features()); | |
1160 | op.setxattr("layout", layout_bl); | |
1161 | ||
1162 | SnapContext snapc; | |
1163 | object_t oid = get_object_name(ino(), frag_t(), ""); | |
1164 | object_locator_t oloc(pool); | |
1165 | Context *fin2 = new C_OnFinisher( | |
1166 | new C_IO_Inode_StoredBacktrace(this, inode.backtrace_version, fin), | |
1167 | mdcache->mds->finisher); | |
1168 | ||
1169 | if (!state_test(STATE_DIRTYPOOL) || inode.old_pools.empty()) { | |
1170 | dout(20) << __func__ << ": no dirtypool or no old pools" << dendl; | |
1171 | mdcache->mds->objecter->mutate(oid, oloc, op, snapc, | |
1172 | ceph::real_clock::now(), | |
1173 | 0, fin2); | |
1174 | return; | |
1175 | } | |
1176 | ||
1177 | C_GatherBuilder gather(g_ceph_context, fin2); | |
1178 | mdcache->mds->objecter->mutate(oid, oloc, op, snapc, | |
1179 | ceph::real_clock::now(), | |
1180 | 0, gather.new_sub()); | |
1181 | ||
1182 | // In the case where DIRTYPOOL is set, we update all old pools backtraces | |
1183 | // such that anyone reading them will see the new pool ID in | |
1184 | // inode_backtrace_t::pool and go read everything else from there. | |
1185 | for (compact_set<int64_t>::iterator p = inode.old_pools.begin(); | |
1186 | p != inode.old_pools.end(); | |
1187 | ++p) { | |
1188 | if (*p == pool) | |
1189 | continue; | |
1190 | ||
1191 | dout(20) << __func__ << ": updating old pool " << *p << dendl; | |
1192 | ||
1193 | ObjectOperation op; | |
1194 | op.priority = op_prio; | |
1195 | op.create(false); | |
1196 | op.setxattr("parent", parent_bl); | |
1197 | ||
1198 | object_locator_t oloc(*p); | |
1199 | mdcache->mds->objecter->mutate(oid, oloc, op, snapc, | |
1200 | ceph::real_clock::now(), | |
1201 | 0, gather.new_sub()); | |
1202 | } | |
1203 | gather.activate(); | |
1204 | } | |
1205 | ||
1206 | void CInode::_stored_backtrace(int r, version_t v, Context *fin) | |
1207 | { | |
1208 | if (r == -ENOENT) { | |
1209 | const int64_t pool = get_backtrace_pool(); | |
1210 | bool exists = mdcache->mds->objecter->with_osdmap( | |
1211 | [pool](const OSDMap &osd_map) { | |
1212 | return osd_map.have_pg_pool(pool); | |
1213 | }); | |
1214 | ||
1215 | // This ENOENT is because the pool doesn't exist (the user deleted it | |
1216 | // out from under us), so the backtrace can never be written, so pretend | |
1217 | // to succeed so that the user can proceed to e.g. delete the file. | |
1218 | if (!exists) { | |
1219 | dout(4) << "store_backtrace got ENOENT: a data pool was deleted " | |
1220 | "beneath us!" << dendl; | |
1221 | r = 0; | |
1222 | } | |
1223 | } | |
1224 | ||
1225 | if (r < 0) { | |
1226 | dout(1) << "store backtrace error " << r << " v " << v << dendl; | |
1227 | mdcache->mds->clog->error() << "failed to store backtrace on ino " | |
1228 | << ino() << " object" | |
1229 | << ", pool " << get_backtrace_pool() | |
1230 | << ", errno " << r; | |
1231 | mdcache->mds->handle_write_error(r); | |
1232 | if (fin) | |
1233 | fin->complete(r); | |
1234 | return; | |
1235 | } | |
1236 | ||
1237 | dout(10) << "_stored_backtrace v " << v << dendl; | |
1238 | ||
1239 | auth_unpin(this); | |
1240 | if (v == inode.backtrace_version) | |
1241 | clear_dirty_parent(); | |
1242 | if (fin) | |
1243 | fin->complete(0); | |
1244 | } | |
1245 | ||
1246 | void CInode::fetch_backtrace(Context *fin, bufferlist *backtrace) | |
1247 | { | |
1248 | mdcache->fetch_backtrace(inode.ino, get_backtrace_pool(), *backtrace, fin); | |
1249 | } | |
1250 | ||
1251 | void CInode::_mark_dirty_parent(LogSegment *ls, bool dirty_pool) | |
1252 | { | |
1253 | if (!state_test(STATE_DIRTYPARENT)) { | |
1254 | dout(10) << "mark_dirty_parent" << dendl; | |
1255 | state_set(STATE_DIRTYPARENT); | |
1256 | get(PIN_DIRTYPARENT); | |
1257 | assert(ls); | |
1258 | } | |
1259 | if (dirty_pool) | |
1260 | state_set(STATE_DIRTYPOOL); | |
1261 | if (ls) | |
1262 | ls->dirty_parent_inodes.push_back(&item_dirty_parent); | |
1263 | } | |
1264 | ||
1265 | void CInode::clear_dirty_parent() | |
1266 | { | |
1267 | if (state_test(STATE_DIRTYPARENT)) { | |
1268 | dout(10) << "clear_dirty_parent" << dendl; | |
1269 | state_clear(STATE_DIRTYPARENT); | |
1270 | state_clear(STATE_DIRTYPOOL); | |
1271 | put(PIN_DIRTYPARENT); | |
1272 | item_dirty_parent.remove_myself(); | |
1273 | } | |
1274 | } | |
1275 | ||
1276 | void CInode::verify_diri_backtrace(bufferlist &bl, int err) | |
1277 | { | |
1278 | if (is_base() || is_dirty_parent() || !is_auth()) | |
1279 | return; | |
1280 | ||
1281 | dout(10) << "verify_diri_backtrace" << dendl; | |
1282 | ||
1283 | if (err == 0) { | |
1284 | inode_backtrace_t backtrace; | |
1285 | ::decode(backtrace, bl); | |
1286 | CDentry *pdn = get_parent_dn(); | |
1287 | if (backtrace.ancestors.empty() || | |
1288 | backtrace.ancestors[0].dname != pdn->name || | |
1289 | backtrace.ancestors[0].dirino != pdn->get_dir()->ino()) | |
1290 | err = -EINVAL; | |
1291 | } | |
1292 | ||
1293 | if (err) { | |
1294 | MDSRank *mds = mdcache->mds; | |
1295 | mds->clog->error() << "bad backtrace on dir ino " << ino(); | |
1296 | assert(!"bad backtrace" == (g_conf->mds_verify_backtrace > 1)); | |
1297 | ||
1298 | _mark_dirty_parent(mds->mdlog->get_current_segment(), false); | |
1299 | mds->mdlog->flush(); | |
1300 | } | |
1301 | } | |
1302 | ||
1303 | // ------------------ | |
1304 | // parent dir | |
1305 | ||
1306 | ||
1307 | void InodeStoreBase::encode_bare(bufferlist &bl, uint64_t features, | |
1308 | const bufferlist *snap_blob) const | |
1309 | { | |
1310 | ::encode(inode, bl, features); | |
1311 | if (is_symlink()) | |
1312 | ::encode(symlink, bl); | |
1313 | ::encode(dirfragtree, bl); | |
1314 | ::encode(xattrs, bl); | |
1315 | if (snap_blob) | |
1316 | ::encode(*snap_blob, bl); | |
1317 | else | |
1318 | ::encode(bufferlist(), bl); | |
1319 | ::encode(old_inodes, bl, features); | |
1320 | ::encode(oldest_snap, bl); | |
1321 | ::encode(damage_flags, bl); | |
1322 | } | |
1323 | ||
1324 | void InodeStoreBase::encode(bufferlist &bl, uint64_t features, | |
1325 | const bufferlist *snap_blob) const | |
1326 | { | |
1327 | ENCODE_START(6, 4, bl); | |
1328 | encode_bare(bl, features, snap_blob); | |
1329 | ENCODE_FINISH(bl); | |
1330 | } | |
1331 | ||
1332 | void CInode::encode_store(bufferlist& bl, uint64_t features) | |
1333 | { | |
1334 | bufferlist snap_blob; | |
1335 | encode_snap_blob(snap_blob); | |
1336 | InodeStoreBase::encode(bl, mdcache->mds->mdsmap->get_up_features(), | |
1337 | &snap_blob); | |
1338 | } | |
1339 | ||
1340 | void InodeStoreBase::decode_bare(bufferlist::iterator &bl, | |
1341 | bufferlist& snap_blob, __u8 struct_v) | |
1342 | { | |
1343 | ::decode(inode, bl); | |
1344 | if (is_symlink()) | |
1345 | ::decode(symlink, bl); | |
1346 | ::decode(dirfragtree, bl); | |
1347 | ::decode(xattrs, bl); | |
1348 | ::decode(snap_blob, bl); | |
1349 | ||
1350 | ::decode(old_inodes, bl); | |
1351 | if (struct_v == 2 && inode.is_dir()) { | |
1352 | bool default_layout_exists; | |
1353 | ::decode(default_layout_exists, bl); | |
1354 | if (default_layout_exists) { | |
1355 | ::decode(struct_v, bl); // this was a default_file_layout | |
1356 | ::decode(inode.layout, bl); // but we only care about the layout portion | |
1357 | } | |
1358 | } | |
1359 | ||
1360 | if (struct_v >= 5) { | |
1361 | // InodeStore is embedded in dentries without proper versioning, so | |
1362 | // we consume up to the end of the buffer | |
1363 | if (!bl.end()) { | |
1364 | ::decode(oldest_snap, bl); | |
1365 | } | |
1366 | ||
1367 | if (!bl.end()) { | |
1368 | ::decode(damage_flags, bl); | |
1369 | } | |
1370 | } | |
1371 | } | |
1372 | ||
1373 | ||
1374 | void InodeStoreBase::decode(bufferlist::iterator &bl, bufferlist& snap_blob) | |
1375 | { | |
1376 | DECODE_START_LEGACY_COMPAT_LEN(5, 4, 4, bl); | |
1377 | decode_bare(bl, snap_blob, struct_v); | |
1378 | DECODE_FINISH(bl); | |
1379 | } | |
1380 | ||
1381 | void CInode::decode_store(bufferlist::iterator& bl) | |
1382 | { | |
1383 | bufferlist snap_blob; | |
1384 | InodeStoreBase::decode(bl, snap_blob); | |
1385 | decode_snap_blob(snap_blob); | |
1386 | } | |
1387 | ||
1388 | // ------------------ | |
1389 | // locking | |
1390 | ||
1391 | void CInode::set_object_info(MDSCacheObjectInfo &info) | |
1392 | { | |
1393 | info.ino = ino(); | |
1394 | info.snapid = last; | |
1395 | } | |
1396 | ||
1397 | void CInode::encode_lock_state(int type, bufferlist& bl) | |
1398 | { | |
1399 | ::encode(first, bl); | |
1400 | ||
1401 | switch (type) { | |
1402 | case CEPH_LOCK_IAUTH: | |
1403 | ::encode(inode.version, bl); | |
1404 | ::encode(inode.ctime, bl); | |
1405 | ::encode(inode.mode, bl); | |
1406 | ::encode(inode.uid, bl); | |
1407 | ::encode(inode.gid, bl); | |
1408 | break; | |
1409 | ||
1410 | case CEPH_LOCK_ILINK: | |
1411 | ::encode(inode.version, bl); | |
1412 | ::encode(inode.ctime, bl); | |
1413 | ::encode(inode.nlink, bl); | |
1414 | break; | |
1415 | ||
1416 | case CEPH_LOCK_IDFT: | |
1417 | if (is_auth()) { | |
1418 | ::encode(inode.version, bl); | |
1419 | } else { | |
1420 | // treat flushing as dirty when rejoining cache | |
1421 | bool dirty = dirfragtreelock.is_dirty_or_flushing(); | |
1422 | ::encode(dirty, bl); | |
1423 | } | |
1424 | { | |
1425 | // encode the raw tree | |
1426 | ::encode(dirfragtree, bl); | |
1427 | ||
1428 | // also specify which frags are mine | |
1429 | set<frag_t> myfrags; | |
1430 | list<CDir*> dfls; | |
1431 | get_dirfrags(dfls); | |
1432 | for (list<CDir*>::iterator p = dfls.begin(); p != dfls.end(); ++p) | |
1433 | if ((*p)->is_auth()) { | |
1434 | frag_t fg = (*p)->get_frag(); | |
1435 | myfrags.insert(fg); | |
1436 | } | |
1437 | ::encode(myfrags, bl); | |
1438 | } | |
1439 | break; | |
1440 | ||
1441 | case CEPH_LOCK_IFILE: | |
1442 | if (is_auth()) { | |
1443 | ::encode(inode.version, bl); | |
1444 | ::encode(inode.ctime, bl); | |
1445 | ::encode(inode.mtime, bl); | |
1446 | ::encode(inode.atime, bl); | |
1447 | ::encode(inode.time_warp_seq, bl); | |
1448 | if (!is_dir()) { | |
1449 | ::encode(inode.layout, bl, mdcache->mds->mdsmap->get_up_features()); | |
1450 | ::encode(inode.size, bl); | |
1451 | ::encode(inode.truncate_seq, bl); | |
1452 | ::encode(inode.truncate_size, bl); | |
1453 | ::encode(inode.client_ranges, bl); | |
1454 | ::encode(inode.inline_data, bl); | |
1455 | } | |
1456 | } else { | |
1457 | // treat flushing as dirty when rejoining cache | |
1458 | bool dirty = filelock.is_dirty_or_flushing(); | |
1459 | ::encode(dirty, bl); | |
1460 | } | |
1461 | ||
1462 | { | |
1463 | dout(15) << "encode_lock_state inode.dirstat is " << inode.dirstat << dendl; | |
1464 | ::encode(inode.dirstat, bl); // only meaningful if i am auth. | |
1465 | bufferlist tmp; | |
1466 | __u32 n = 0; | |
1467 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
1468 | p != dirfrags.end(); | |
1469 | ++p) { | |
1470 | frag_t fg = p->first; | |
1471 | CDir *dir = p->second; | |
1472 | if (is_auth() || dir->is_auth()) { | |
1473 | fnode_t *pf = dir->get_projected_fnode(); | |
1474 | dout(15) << fg << " " << *dir << dendl; | |
1475 | dout(20) << fg << " fragstat " << pf->fragstat << dendl; | |
1476 | dout(20) << fg << " accounted_fragstat " << pf->accounted_fragstat << dendl; | |
1477 | ::encode(fg, tmp); | |
1478 | ::encode(dir->first, tmp); | |
1479 | ::encode(pf->fragstat, tmp); | |
1480 | ::encode(pf->accounted_fragstat, tmp); | |
1481 | n++; | |
1482 | } | |
1483 | } | |
1484 | ::encode(n, bl); | |
1485 | bl.claim_append(tmp); | |
1486 | } | |
1487 | break; | |
1488 | ||
1489 | case CEPH_LOCK_INEST: | |
1490 | if (is_auth()) { | |
1491 | ::encode(inode.version, bl); | |
1492 | } else { | |
1493 | // treat flushing as dirty when rejoining cache | |
1494 | bool dirty = nestlock.is_dirty_or_flushing(); | |
1495 | ::encode(dirty, bl); | |
1496 | } | |
1497 | { | |
1498 | dout(15) << "encode_lock_state inode.rstat is " << inode.rstat << dendl; | |
1499 | ::encode(inode.rstat, bl); // only meaningful if i am auth. | |
1500 | bufferlist tmp; | |
1501 | __u32 n = 0; | |
1502 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
1503 | p != dirfrags.end(); | |
1504 | ++p) { | |
1505 | frag_t fg = p->first; | |
1506 | CDir *dir = p->second; | |
1507 | if (is_auth() || dir->is_auth()) { | |
1508 | fnode_t *pf = dir->get_projected_fnode(); | |
1509 | dout(10) << fg << " " << *dir << dendl; | |
1510 | dout(10) << fg << " " << pf->rstat << dendl; | |
1511 | dout(10) << fg << " " << pf->rstat << dendl; | |
1512 | dout(10) << fg << " " << dir->dirty_old_rstat << dendl; | |
1513 | ::encode(fg, tmp); | |
1514 | ::encode(dir->first, tmp); | |
1515 | ::encode(pf->rstat, tmp); | |
1516 | ::encode(pf->accounted_rstat, tmp); | |
1517 | ::encode(dir->dirty_old_rstat, tmp); | |
1518 | n++; | |
1519 | } | |
1520 | } | |
1521 | ::encode(n, bl); | |
1522 | bl.claim_append(tmp); | |
1523 | } | |
1524 | break; | |
1525 | ||
1526 | case CEPH_LOCK_IXATTR: | |
1527 | ::encode(inode.version, bl); | |
1528 | ::encode(inode.ctime, bl); | |
1529 | ::encode(xattrs, bl); | |
1530 | break; | |
1531 | ||
1532 | case CEPH_LOCK_ISNAP: | |
1533 | ::encode(inode.version, bl); | |
1534 | ::encode(inode.ctime, bl); | |
1535 | encode_snap(bl); | |
1536 | break; | |
1537 | ||
1538 | case CEPH_LOCK_IFLOCK: | |
1539 | ::encode(inode.version, bl); | |
1540 | _encode_file_locks(bl); | |
1541 | break; | |
1542 | ||
1543 | case CEPH_LOCK_IPOLICY: | |
1544 | if (inode.is_dir()) { | |
1545 | ::encode(inode.version, bl); | |
1546 | ::encode(inode.ctime, bl); | |
1547 | ::encode(inode.layout, bl, mdcache->mds->mdsmap->get_up_features()); | |
1548 | ::encode(inode.quota, bl); | |
1549 | ::encode(inode.export_pin, bl); | |
1550 | } | |
1551 | break; | |
1552 | ||
1553 | default: | |
1554 | ceph_abort(); | |
1555 | } | |
1556 | } | |
1557 | ||
1558 | ||
1559 | /* for more info on scatterlocks, see comments by Locker::scatter_writebehind */ | |
1560 | ||
1561 | void CInode::decode_lock_state(int type, bufferlist& bl) | |
1562 | { | |
1563 | bufferlist::iterator p = bl.begin(); | |
1564 | utime_t tm; | |
1565 | ||
1566 | snapid_t newfirst; | |
1567 | ::decode(newfirst, p); | |
1568 | ||
1569 | if (!is_auth() && newfirst != first) { | |
1570 | dout(10) << "decode_lock_state first " << first << " -> " << newfirst << dendl; | |
1571 | assert(newfirst > first); | |
1572 | if (!is_multiversion() && parent) { | |
1573 | assert(parent->first == first); | |
1574 | parent->first = newfirst; | |
1575 | } | |
1576 | first = newfirst; | |
1577 | } | |
1578 | ||
1579 | switch (type) { | |
1580 | case CEPH_LOCK_IAUTH: | |
1581 | ::decode(inode.version, p); | |
1582 | ::decode(tm, p); | |
1583 | if (inode.ctime < tm) inode.ctime = tm; | |
1584 | ::decode(inode.mode, p); | |
1585 | ::decode(inode.uid, p); | |
1586 | ::decode(inode.gid, p); | |
1587 | break; | |
1588 | ||
1589 | case CEPH_LOCK_ILINK: | |
1590 | ::decode(inode.version, p); | |
1591 | ::decode(tm, p); | |
1592 | if (inode.ctime < tm) inode.ctime = tm; | |
1593 | ::decode(inode.nlink, p); | |
1594 | break; | |
1595 | ||
1596 | case CEPH_LOCK_IDFT: | |
1597 | if (is_auth()) { | |
1598 | bool replica_dirty; | |
1599 | ::decode(replica_dirty, p); | |
1600 | if (replica_dirty) { | |
1601 | dout(10) << "decode_lock_state setting dftlock dirty flag" << dendl; | |
1602 | dirfragtreelock.mark_dirty(); // ok bc we're auth and caller will handle | |
1603 | } | |
1604 | } else { | |
1605 | ::decode(inode.version, p); | |
1606 | } | |
1607 | { | |
1608 | fragtree_t temp; | |
1609 | ::decode(temp, p); | |
1610 | set<frag_t> authfrags; | |
1611 | ::decode(authfrags, p); | |
1612 | if (is_auth()) { | |
1613 | // auth. believe replica's auth frags only. | |
1614 | for (set<frag_t>::iterator p = authfrags.begin(); p != authfrags.end(); ++p) | |
1615 | if (!dirfragtree.is_leaf(*p)) { | |
1616 | dout(10) << " forcing frag " << *p << " to leaf (split|merge)" << dendl; | |
1617 | dirfragtree.force_to_leaf(g_ceph_context, *p); | |
1618 | dirfragtreelock.mark_dirty(); // ok bc we're auth and caller will handle | |
1619 | } | |
1620 | } else { | |
1621 | // replica. take the new tree, BUT make sure any open | |
1622 | // dirfrags remain leaves (they may have split _after_ this | |
1623 | // dft was scattered, or we may still be be waiting on the | |
1624 | // notify from the auth) | |
1625 | dirfragtree.swap(temp); | |
1626 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
1627 | p != dirfrags.end(); | |
1628 | ++p) { | |
1629 | if (!dirfragtree.is_leaf(p->first)) { | |
1630 | dout(10) << " forcing open dirfrag " << p->first << " to leaf (racing with split|merge)" << dendl; | |
1631 | dirfragtree.force_to_leaf(g_ceph_context, p->first); | |
1632 | } | |
1633 | if (p->second->is_auth()) | |
1634 | p->second->state_clear(CDir::STATE_DIRTYDFT); | |
1635 | } | |
1636 | } | |
1637 | if (g_conf->mds_debug_frag) | |
1638 | verify_dirfrags(); | |
1639 | } | |
1640 | break; | |
1641 | ||
1642 | case CEPH_LOCK_IFILE: | |
1643 | if (!is_auth()) { | |
1644 | ::decode(inode.version, p); | |
1645 | ::decode(tm, p); | |
1646 | if (inode.ctime < tm) inode.ctime = tm; | |
1647 | ::decode(inode.mtime, p); | |
1648 | ::decode(inode.atime, p); | |
1649 | ::decode(inode.time_warp_seq, p); | |
1650 | if (!is_dir()) { | |
1651 | ::decode(inode.layout, p); | |
1652 | ::decode(inode.size, p); | |
1653 | ::decode(inode.truncate_seq, p); | |
1654 | ::decode(inode.truncate_size, p); | |
1655 | ::decode(inode.client_ranges, p); | |
1656 | ::decode(inode.inline_data, p); | |
1657 | } | |
1658 | } else { | |
1659 | bool replica_dirty; | |
1660 | ::decode(replica_dirty, p); | |
1661 | if (replica_dirty) { | |
1662 | dout(10) << "decode_lock_state setting filelock dirty flag" << dendl; | |
1663 | filelock.mark_dirty(); // ok bc we're auth and caller will handle | |
1664 | } | |
1665 | } | |
1666 | { | |
1667 | frag_info_t dirstat; | |
1668 | ::decode(dirstat, p); | |
1669 | if (!is_auth()) { | |
1670 | dout(10) << " taking inode dirstat " << dirstat << " for " << *this << dendl; | |
1671 | inode.dirstat = dirstat; // take inode summation if replica | |
1672 | } | |
1673 | __u32 n; | |
1674 | ::decode(n, p); | |
1675 | dout(10) << " ...got " << n << " fragstats on " << *this << dendl; | |
1676 | while (n--) { | |
1677 | frag_t fg; | |
1678 | snapid_t fgfirst; | |
1679 | frag_info_t fragstat; | |
1680 | frag_info_t accounted_fragstat; | |
1681 | ::decode(fg, p); | |
1682 | ::decode(fgfirst, p); | |
1683 | ::decode(fragstat, p); | |
1684 | ::decode(accounted_fragstat, p); | |
1685 | dout(10) << fg << " [" << fgfirst << ",head] " << dendl; | |
1686 | dout(10) << fg << " fragstat " << fragstat << dendl; | |
1687 | dout(20) << fg << " accounted_fragstat " << accounted_fragstat << dendl; | |
1688 | ||
1689 | CDir *dir = get_dirfrag(fg); | |
1690 | if (is_auth()) { | |
1691 | assert(dir); // i am auth; i had better have this dir open | |
1692 | dout(10) << fg << " first " << dir->first << " -> " << fgfirst | |
1693 | << " on " << *dir << dendl; | |
1694 | dir->first = fgfirst; | |
1695 | dir->fnode.fragstat = fragstat; | |
1696 | dir->fnode.accounted_fragstat = accounted_fragstat; | |
1697 | dir->first = fgfirst; | |
1698 | if (!(fragstat == accounted_fragstat)) { | |
1699 | dout(10) << fg << " setting filelock updated flag" << dendl; | |
1700 | filelock.mark_dirty(); // ok bc we're auth and caller will handle | |
1701 | } | |
1702 | } else { | |
1703 | if (dir && dir->is_auth()) { | |
1704 | dout(10) << fg << " first " << dir->first << " -> " << fgfirst | |
1705 | << " on " << *dir << dendl; | |
1706 | dir->first = fgfirst; | |
1707 | fnode_t *pf = dir->get_projected_fnode(); | |
1708 | finish_scatter_update(&filelock, dir, | |
1709 | inode.dirstat.version, pf->accounted_fragstat.version); | |
1710 | } | |
1711 | } | |
1712 | } | |
1713 | } | |
1714 | break; | |
1715 | ||
1716 | case CEPH_LOCK_INEST: | |
1717 | if (is_auth()) { | |
1718 | bool replica_dirty; | |
1719 | ::decode(replica_dirty, p); | |
1720 | if (replica_dirty) { | |
1721 | dout(10) << "decode_lock_state setting nestlock dirty flag" << dendl; | |
1722 | nestlock.mark_dirty(); // ok bc we're auth and caller will handle | |
1723 | } | |
1724 | } else { | |
1725 | ::decode(inode.version, p); | |
1726 | } | |
1727 | { | |
1728 | nest_info_t rstat; | |
1729 | ::decode(rstat, p); | |
1730 | if (!is_auth()) { | |
1731 | dout(10) << " taking inode rstat " << rstat << " for " << *this << dendl; | |
1732 | inode.rstat = rstat; // take inode summation if replica | |
1733 | } | |
1734 | __u32 n; | |
1735 | ::decode(n, p); | |
1736 | while (n--) { | |
1737 | frag_t fg; | |
1738 | snapid_t fgfirst; | |
1739 | nest_info_t rstat; | |
1740 | nest_info_t accounted_rstat; | |
1741 | compact_map<snapid_t,old_rstat_t> dirty_old_rstat; | |
1742 | ::decode(fg, p); | |
1743 | ::decode(fgfirst, p); | |
1744 | ::decode(rstat, p); | |
1745 | ::decode(accounted_rstat, p); | |
1746 | ::decode(dirty_old_rstat, p); | |
1747 | dout(10) << fg << " [" << fgfirst << ",head]" << dendl; | |
1748 | dout(10) << fg << " rstat " << rstat << dendl; | |
1749 | dout(10) << fg << " accounted_rstat " << accounted_rstat << dendl; | |
1750 | dout(10) << fg << " dirty_old_rstat " << dirty_old_rstat << dendl; | |
1751 | ||
1752 | CDir *dir = get_dirfrag(fg); | |
1753 | if (is_auth()) { | |
1754 | assert(dir); // i am auth; i had better have this dir open | |
1755 | dout(10) << fg << " first " << dir->first << " -> " << fgfirst | |
1756 | << " on " << *dir << dendl; | |
1757 | dir->first = fgfirst; | |
1758 | dir->fnode.rstat = rstat; | |
1759 | dir->fnode.accounted_rstat = accounted_rstat; | |
1760 | dir->dirty_old_rstat.swap(dirty_old_rstat); | |
1761 | if (!(rstat == accounted_rstat) || !dir->dirty_old_rstat.empty()) { | |
1762 | dout(10) << fg << " setting nestlock updated flag" << dendl; | |
1763 | nestlock.mark_dirty(); // ok bc we're auth and caller will handle | |
1764 | } | |
1765 | } else { | |
1766 | if (dir && dir->is_auth()) { | |
1767 | dout(10) << fg << " first " << dir->first << " -> " << fgfirst | |
1768 | << " on " << *dir << dendl; | |
1769 | dir->first = fgfirst; | |
1770 | fnode_t *pf = dir->get_projected_fnode(); | |
1771 | finish_scatter_update(&nestlock, dir, | |
1772 | inode.rstat.version, pf->accounted_rstat.version); | |
1773 | } | |
1774 | } | |
1775 | } | |
1776 | } | |
1777 | break; | |
1778 | ||
1779 | case CEPH_LOCK_IXATTR: | |
1780 | ::decode(inode.version, p); | |
1781 | ::decode(tm, p); | |
1782 | if (inode.ctime < tm) inode.ctime = tm; | |
1783 | ::decode(xattrs, p); | |
1784 | break; | |
1785 | ||
1786 | case CEPH_LOCK_ISNAP: | |
1787 | { | |
1788 | ::decode(inode.version, p); | |
1789 | ::decode(tm, p); | |
1790 | if (inode.ctime < tm) inode.ctime = tm; | |
1791 | snapid_t seq = 0; | |
1792 | if (snaprealm) | |
1793 | seq = snaprealm->srnode.seq; | |
1794 | decode_snap(p); | |
1795 | if (snaprealm && snaprealm->srnode.seq != seq) | |
1796 | mdcache->do_realm_invalidate_and_update_notify(this, seq ? CEPH_SNAP_OP_UPDATE:CEPH_SNAP_OP_SPLIT); | |
1797 | } | |
1798 | break; | |
1799 | ||
1800 | case CEPH_LOCK_IFLOCK: | |
1801 | ::decode(inode.version, p); | |
1802 | _decode_file_locks(p); | |
1803 | break; | |
1804 | ||
1805 | case CEPH_LOCK_IPOLICY: | |
1806 | if (inode.is_dir()) { | |
1807 | ::decode(inode.version, p); | |
1808 | ::decode(tm, p); | |
1809 | if (inode.ctime < tm) inode.ctime = tm; | |
1810 | ::decode(inode.layout, p); | |
1811 | ::decode(inode.quota, p); | |
31f18b77 | 1812 | mds_rank_t old_pin = inode.export_pin; |
7c673cae | 1813 | ::decode(inode.export_pin, p); |
31f18b77 | 1814 | maybe_export_pin(old_pin != inode.export_pin); |
7c673cae FG |
1815 | } |
1816 | break; | |
1817 | ||
1818 | default: | |
1819 | ceph_abort(); | |
1820 | } | |
1821 | } | |
1822 | ||
1823 | ||
1824 | bool CInode::is_dirty_scattered() | |
1825 | { | |
1826 | return | |
1827 | filelock.is_dirty_or_flushing() || | |
1828 | nestlock.is_dirty_or_flushing() || | |
1829 | dirfragtreelock.is_dirty_or_flushing(); | |
1830 | } | |
1831 | ||
1832 | void CInode::clear_scatter_dirty() | |
1833 | { | |
1834 | filelock.remove_dirty(); | |
1835 | nestlock.remove_dirty(); | |
1836 | dirfragtreelock.remove_dirty(); | |
1837 | } | |
1838 | ||
1839 | void CInode::clear_dirty_scattered(int type) | |
1840 | { | |
1841 | dout(10) << "clear_dirty_scattered " << type << " on " << *this << dendl; | |
1842 | switch (type) { | |
1843 | case CEPH_LOCK_IFILE: | |
1844 | item_dirty_dirfrag_dir.remove_myself(); | |
1845 | break; | |
1846 | ||
1847 | case CEPH_LOCK_INEST: | |
1848 | item_dirty_dirfrag_nest.remove_myself(); | |
1849 | break; | |
1850 | ||
1851 | case CEPH_LOCK_IDFT: | |
1852 | item_dirty_dirfrag_dirfragtree.remove_myself(); | |
1853 | break; | |
1854 | ||
1855 | default: | |
1856 | ceph_abort(); | |
1857 | } | |
1858 | } | |
1859 | ||
1860 | ||
1861 | /* | |
1862 | * when we initially scatter a lock, we need to check if any of the dirfrags | |
1863 | * have out of date accounted_rstat/fragstat. if so, mark the lock stale. | |
1864 | */ | |
1865 | /* for more info on scatterlocks, see comments by Locker::scatter_writebehind */ | |
1866 | void CInode::start_scatter(ScatterLock *lock) | |
1867 | { | |
1868 | dout(10) << "start_scatter " << *lock << " on " << *this << dendl; | |
1869 | assert(is_auth()); | |
1870 | inode_t *pi = get_projected_inode(); | |
1871 | ||
1872 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
1873 | p != dirfrags.end(); | |
1874 | ++p) { | |
1875 | frag_t fg = p->first; | |
1876 | CDir *dir = p->second; | |
1877 | fnode_t *pf = dir->get_projected_fnode(); | |
1878 | dout(20) << fg << " " << *dir << dendl; | |
1879 | ||
1880 | if (!dir->is_auth()) | |
1881 | continue; | |
1882 | ||
1883 | switch (lock->get_type()) { | |
1884 | case CEPH_LOCK_IFILE: | |
1885 | finish_scatter_update(lock, dir, pi->dirstat.version, pf->accounted_fragstat.version); | |
1886 | break; | |
1887 | ||
1888 | case CEPH_LOCK_INEST: | |
1889 | finish_scatter_update(lock, dir, pi->rstat.version, pf->accounted_rstat.version); | |
1890 | break; | |
1891 | ||
1892 | case CEPH_LOCK_IDFT: | |
1893 | dir->state_clear(CDir::STATE_DIRTYDFT); | |
1894 | break; | |
1895 | } | |
1896 | } | |
1897 | } | |
1898 | ||
1899 | ||
1900 | class C_Inode_FragUpdate : public MDSLogContextBase { | |
1901 | protected: | |
1902 | CInode *in; | |
1903 | CDir *dir; | |
1904 | MutationRef mut; | |
1905 | MDSRank *get_mds() override {return in->mdcache->mds;} | |
1906 | void finish(int r) override { | |
1907 | in->_finish_frag_update(dir, mut); | |
1908 | } | |
1909 | ||
1910 | public: | |
1911 | C_Inode_FragUpdate(CInode *i, CDir *d, MutationRef& m) : in(i), dir(d), mut(m) {} | |
1912 | }; | |
1913 | ||
1914 | void CInode::finish_scatter_update(ScatterLock *lock, CDir *dir, | |
1915 | version_t inode_version, version_t dir_accounted_version) | |
1916 | { | |
1917 | frag_t fg = dir->get_frag(); | |
1918 | assert(dir->is_auth()); | |
1919 | ||
1920 | if (dir->is_frozen()) { | |
1921 | dout(10) << "finish_scatter_update " << fg << " frozen, marking " << *lock << " stale " << *dir << dendl; | |
1922 | } else if (dir->get_version() == 0) { | |
1923 | dout(10) << "finish_scatter_update " << fg << " not loaded, marking " << *lock << " stale " << *dir << dendl; | |
1924 | } else { | |
1925 | if (dir_accounted_version != inode_version) { | |
1926 | dout(10) << "finish_scatter_update " << fg << " journaling accounted scatterstat update v" << inode_version << dendl; | |
1927 | ||
1928 | MDLog *mdlog = mdcache->mds->mdlog; | |
1929 | MutationRef mut(new MutationImpl()); | |
1930 | mut->ls = mdlog->get_current_segment(); | |
1931 | ||
1932 | inode_t *pi = get_projected_inode(); | |
1933 | fnode_t *pf = dir->project_fnode(); | |
1934 | pf->version = dir->pre_dirty(); | |
1935 | ||
1936 | const char *ename = 0; | |
1937 | switch (lock->get_type()) { | |
1938 | case CEPH_LOCK_IFILE: | |
1939 | pf->fragstat.version = pi->dirstat.version; | |
1940 | pf->accounted_fragstat = pf->fragstat; | |
1941 | ename = "lock ifile accounted scatter stat update"; | |
1942 | break; | |
1943 | case CEPH_LOCK_INEST: | |
1944 | pf->rstat.version = pi->rstat.version; | |
1945 | pf->accounted_rstat = pf->rstat; | |
1946 | ename = "lock inest accounted scatter stat update"; | |
1947 | break; | |
1948 | default: | |
1949 | ceph_abort(); | |
1950 | } | |
1951 | ||
1952 | mut->add_projected_fnode(dir); | |
1953 | ||
1954 | EUpdate *le = new EUpdate(mdlog, ename); | |
1955 | mdlog->start_entry(le); | |
1956 | le->metablob.add_dir_context(dir); | |
1957 | le->metablob.add_dir(dir, true); | |
1958 | ||
1959 | assert(!dir->is_frozen()); | |
1960 | mut->auth_pin(dir); | |
1961 | ||
1962 | mdlog->submit_entry(le, new C_Inode_FragUpdate(this, dir, mut)); | |
1963 | } else { | |
1964 | dout(10) << "finish_scatter_update " << fg << " accounted " << *lock | |
1965 | << " scatter stat unchanged at v" << dir_accounted_version << dendl; | |
1966 | } | |
1967 | } | |
1968 | } | |
1969 | ||
1970 | void CInode::_finish_frag_update(CDir *dir, MutationRef& mut) | |
1971 | { | |
1972 | dout(10) << "_finish_frag_update on " << *dir << dendl; | |
1973 | mut->apply(); | |
1974 | mut->cleanup(); | |
1975 | } | |
1976 | ||
1977 | ||
1978 | /* | |
1979 | * when we gather a lock, we need to assimilate dirfrag changes into the inode | |
1980 | * state. it's possible we can't update the dirfrag accounted_rstat/fragstat | |
1981 | * because the frag is auth and frozen, or that the replica couldn't for the same | |
1982 | * reason. hopefully it will get updated the next time the lock cycles. | |
1983 | * | |
1984 | * we have two dimensions of behavior: | |
1985 | * - we may be (auth and !frozen), and able to update, or not. | |
1986 | * - the frag may be stale, or not. | |
1987 | * | |
1988 | * if the frag is non-stale, we want to assimilate the diff into the | |
1989 | * inode, regardless of whether it's auth or updateable. | |
1990 | * | |
1991 | * if we update the frag, we want to set accounted_fragstat = frag, | |
1992 | * both if we took the diff or it was stale and we are making it | |
1993 | * un-stale. | |
1994 | */ | |
1995 | /* for more info on scatterlocks, see comments by Locker::scatter_writebehind */ | |
1996 | void CInode::finish_scatter_gather_update(int type) | |
1997 | { | |
1998 | LogChannelRef clog = mdcache->mds->clog; | |
1999 | ||
2000 | dout(10) << "finish_scatter_gather_update " << type << " on " << *this << dendl; | |
2001 | assert(is_auth()); | |
2002 | ||
2003 | switch (type) { | |
2004 | case CEPH_LOCK_IFILE: | |
2005 | { | |
2006 | fragtree_t tmpdft = dirfragtree; | |
2007 | struct frag_info_t dirstat; | |
2008 | bool dirstat_valid = true; | |
2009 | ||
2010 | // adjust summation | |
2011 | assert(is_auth()); | |
2012 | inode_t *pi = get_projected_inode(); | |
2013 | ||
2014 | bool touched_mtime = false, touched_chattr = false; | |
2015 | dout(20) << " orig dirstat " << pi->dirstat << dendl; | |
2016 | pi->dirstat.version++; | |
2017 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
2018 | p != dirfrags.end(); | |
2019 | ++p) { | |
2020 | frag_t fg = p->first; | |
2021 | CDir *dir = p->second; | |
2022 | dout(20) << fg << " " << *dir << dendl; | |
2023 | ||
2024 | bool update; | |
2025 | if (dir->get_version() != 0) { | |
2026 | update = dir->is_auth() && !dir->is_frozen(); | |
2027 | } else { | |
2028 | update = false; | |
2029 | dirstat_valid = false; | |
2030 | } | |
2031 | ||
2032 | fnode_t *pf = dir->get_projected_fnode(); | |
2033 | if (update) | |
2034 | pf = dir->project_fnode(); | |
2035 | ||
2036 | if (pf->accounted_fragstat.version == pi->dirstat.version - 1) { | |
2037 | dout(20) << fg << " fragstat " << pf->fragstat << dendl; | |
2038 | dout(20) << fg << " accounted_fragstat " << pf->accounted_fragstat << dendl; | |
2039 | pi->dirstat.add_delta(pf->fragstat, pf->accounted_fragstat, &touched_mtime, &touched_chattr); | |
2040 | } else { | |
2041 | dout(20) << fg << " skipping STALE accounted_fragstat " << pf->accounted_fragstat << dendl; | |
2042 | } | |
2043 | ||
2044 | if (pf->fragstat.nfiles < 0 || | |
2045 | pf->fragstat.nsubdirs < 0) { | |
2046 | clog->error() << "bad/negative dir size on " | |
2047 | << dir->dirfrag() << " " << pf->fragstat; | |
2048 | assert(!"bad/negative fragstat" == g_conf->mds_verify_scatter); | |
2049 | ||
2050 | if (pf->fragstat.nfiles < 0) | |
2051 | pf->fragstat.nfiles = 0; | |
2052 | if (pf->fragstat.nsubdirs < 0) | |
2053 | pf->fragstat.nsubdirs = 0; | |
2054 | } | |
2055 | ||
2056 | if (update) { | |
2057 | pf->accounted_fragstat = pf->fragstat; | |
2058 | pf->fragstat.version = pf->accounted_fragstat.version = pi->dirstat.version; | |
2059 | dout(10) << fg << " updated accounted_fragstat " << pf->fragstat << " on " << *dir << dendl; | |
2060 | } | |
2061 | ||
2062 | tmpdft.force_to_leaf(g_ceph_context, fg); | |
2063 | dirstat.add(pf->fragstat); | |
2064 | } | |
2065 | if (touched_mtime) | |
2066 | pi->mtime = pi->ctime = pi->dirstat.mtime; | |
2067 | if (touched_chattr) | |
2068 | pi->change_attr = pi->dirstat.change_attr; | |
2069 | dout(20) << " final dirstat " << pi->dirstat << dendl; | |
2070 | ||
2071 | if (dirstat_valid && !dirstat.same_sums(pi->dirstat)) { | |
2072 | list<frag_t> ls; | |
2073 | tmpdft.get_leaves_under(frag_t(), ls); | |
2074 | for (list<frag_t>::iterator p = ls.begin(); p != ls.end(); ++p) | |
2075 | if (!dirfrags.count(*p)) { | |
2076 | dirstat_valid = false; | |
2077 | break; | |
2078 | } | |
2079 | if (dirstat_valid) { | |
2080 | if (state_test(CInode::STATE_REPAIRSTATS)) { | |
2081 | dout(20) << " dirstat mismatch, fixing" << dendl; | |
2082 | } else { | |
2083 | clog->error() << "unmatched fragstat on " << ino() << ", inode has " | |
2084 | << pi->dirstat << ", dirfrags have " << dirstat; | |
2085 | assert(!"unmatched fragstat" == g_conf->mds_verify_scatter); | |
2086 | } | |
2087 | // trust the dirfrags for now | |
2088 | version_t v = pi->dirstat.version; | |
2089 | if (pi->dirstat.mtime > dirstat.mtime) | |
2090 | dirstat.mtime = pi->dirstat.mtime; | |
2091 | if (pi->dirstat.change_attr > dirstat.change_attr) | |
2092 | dirstat.change_attr = pi->dirstat.change_attr; | |
2093 | pi->dirstat = dirstat; | |
2094 | pi->dirstat.version = v; | |
2095 | } | |
2096 | } | |
2097 | ||
2098 | if (pi->dirstat.nfiles < 0 || | |
2099 | pi->dirstat.nsubdirs < 0) { | |
2100 | clog->error() << "bad/negative fragstat on " << ino() | |
2101 | << ", inode has " << pi->dirstat; | |
2102 | assert(!"bad/negative fragstat" == g_conf->mds_verify_scatter); | |
2103 | ||
2104 | if (pi->dirstat.nfiles < 0) | |
2105 | pi->dirstat.nfiles = 0; | |
2106 | if (pi->dirstat.nsubdirs < 0) | |
2107 | pi->dirstat.nsubdirs = 0; | |
2108 | } | |
2109 | } | |
2110 | break; | |
2111 | ||
2112 | case CEPH_LOCK_INEST: | |
2113 | { | |
2114 | fragtree_t tmpdft = dirfragtree; | |
2115 | nest_info_t rstat; | |
2116 | rstat.rsubdirs = 1; | |
2117 | bool rstat_valid = true; | |
2118 | ||
2119 | // adjust summation | |
2120 | assert(is_auth()); | |
2121 | inode_t *pi = get_projected_inode(); | |
2122 | dout(20) << " orig rstat " << pi->rstat << dendl; | |
2123 | pi->rstat.version++; | |
2124 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
2125 | p != dirfrags.end(); | |
2126 | ++p) { | |
2127 | frag_t fg = p->first; | |
2128 | CDir *dir = p->second; | |
2129 | dout(20) << fg << " " << *dir << dendl; | |
2130 | ||
2131 | bool update; | |
2132 | if (dir->get_version() != 0) { | |
2133 | update = dir->is_auth() && !dir->is_frozen(); | |
2134 | } else { | |
2135 | update = false; | |
2136 | rstat_valid = false; | |
2137 | } | |
2138 | ||
2139 | fnode_t *pf = dir->get_projected_fnode(); | |
2140 | if (update) | |
2141 | pf = dir->project_fnode(); | |
2142 | ||
2143 | if (pf->accounted_rstat.version == pi->rstat.version-1) { | |
2144 | // only pull this frag's dirty rstat inodes into the frag if | |
2145 | // the frag is non-stale and updateable. if it's stale, | |
2146 | // that info will just get thrown out! | |
2147 | if (update) | |
2148 | dir->assimilate_dirty_rstat_inodes(); | |
2149 | ||
2150 | dout(20) << fg << " rstat " << pf->rstat << dendl; | |
2151 | dout(20) << fg << " accounted_rstat " << pf->accounted_rstat << dendl; | |
2152 | dout(20) << fg << " dirty_old_rstat " << dir->dirty_old_rstat << dendl; | |
2153 | mdcache->project_rstat_frag_to_inode(pf->rstat, pf->accounted_rstat, | |
2154 | dir->first, CEPH_NOSNAP, this, true); | |
2155 | for (compact_map<snapid_t,old_rstat_t>::iterator q = dir->dirty_old_rstat.begin(); | |
2156 | q != dir->dirty_old_rstat.end(); | |
2157 | ++q) | |
2158 | mdcache->project_rstat_frag_to_inode(q->second.rstat, q->second.accounted_rstat, | |
2159 | q->second.first, q->first, this, true); | |
2160 | if (update) // dir contents not valid if frozen or non-auth | |
2161 | dir->check_rstats(); | |
2162 | } else { | |
2163 | dout(20) << fg << " skipping STALE accounted_rstat " << pf->accounted_rstat << dendl; | |
2164 | } | |
2165 | if (update) { | |
2166 | pf->accounted_rstat = pf->rstat; | |
2167 | dir->dirty_old_rstat.clear(); | |
2168 | pf->rstat.version = pf->accounted_rstat.version = pi->rstat.version; | |
2169 | dir->check_rstats(); | |
2170 | dout(10) << fg << " updated accounted_rstat " << pf->rstat << " on " << *dir << dendl; | |
2171 | } | |
2172 | ||
2173 | tmpdft.force_to_leaf(g_ceph_context, fg); | |
2174 | rstat.add(pf->rstat); | |
2175 | } | |
2176 | dout(20) << " final rstat " << pi->rstat << dendl; | |
2177 | ||
2178 | if (rstat_valid && !rstat.same_sums(pi->rstat)) { | |
2179 | list<frag_t> ls; | |
2180 | tmpdft.get_leaves_under(frag_t(), ls); | |
2181 | for (list<frag_t>::iterator p = ls.begin(); p != ls.end(); ++p) | |
2182 | if (!dirfrags.count(*p)) { | |
2183 | rstat_valid = false; | |
2184 | break; | |
2185 | } | |
2186 | if (rstat_valid) { | |
2187 | if (state_test(CInode::STATE_REPAIRSTATS)) { | |
2188 | dout(20) << " rstat mismatch, fixing" << dendl; | |
2189 | } else { | |
2190 | clog->error() << "unmatched rstat on " << ino() << ", inode has " | |
2191 | << pi->rstat << ", dirfrags have " << rstat; | |
2192 | assert(!"unmatched rstat" == g_conf->mds_verify_scatter); | |
2193 | } | |
2194 | // trust the dirfrag for now | |
2195 | version_t v = pi->rstat.version; | |
2196 | if (pi->rstat.rctime > rstat.rctime) | |
2197 | rstat.rctime = pi->rstat.rctime; | |
2198 | pi->rstat = rstat; | |
2199 | pi->rstat.version = v; | |
2200 | } | |
2201 | } | |
2202 | ||
2203 | mdcache->broadcast_quota_to_client(this); | |
2204 | } | |
2205 | break; | |
2206 | ||
2207 | case CEPH_LOCK_IDFT: | |
2208 | break; | |
2209 | ||
2210 | default: | |
2211 | ceph_abort(); | |
2212 | } | |
2213 | } | |
2214 | ||
2215 | void CInode::finish_scatter_gather_update_accounted(int type, MutationRef& mut, EMetaBlob *metablob) | |
2216 | { | |
2217 | dout(10) << "finish_scatter_gather_update_accounted " << type << " on " << *this << dendl; | |
2218 | assert(is_auth()); | |
2219 | ||
2220 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
2221 | p != dirfrags.end(); | |
2222 | ++p) { | |
2223 | CDir *dir = p->second; | |
2224 | if (!dir->is_auth() || dir->get_version() == 0 || dir->is_frozen()) | |
2225 | continue; | |
2226 | ||
2227 | if (type == CEPH_LOCK_IDFT) | |
2228 | continue; // nothing to do. | |
2229 | ||
2230 | dout(10) << " journaling updated frag accounted_ on " << *dir << dendl; | |
2231 | assert(dir->is_projected()); | |
2232 | fnode_t *pf = dir->get_projected_fnode(); | |
2233 | pf->version = dir->pre_dirty(); | |
2234 | mut->add_projected_fnode(dir); | |
2235 | metablob->add_dir(dir, true); | |
2236 | mut->auth_pin(dir); | |
2237 | ||
2238 | if (type == CEPH_LOCK_INEST) | |
2239 | dir->assimilate_dirty_rstat_inodes_finish(mut, metablob); | |
2240 | } | |
2241 | } | |
2242 | ||
2243 | // waiting | |
2244 | ||
2245 | bool CInode::is_frozen() const | |
2246 | { | |
2247 | if (is_frozen_inode()) return true; | |
2248 | if (parent && parent->dir->is_frozen()) return true; | |
2249 | return false; | |
2250 | } | |
2251 | ||
2252 | bool CInode::is_frozen_dir() const | |
2253 | { | |
2254 | if (parent && parent->dir->is_frozen_dir()) return true; | |
2255 | return false; | |
2256 | } | |
2257 | ||
2258 | bool CInode::is_freezing() const | |
2259 | { | |
2260 | if (is_freezing_inode()) return true; | |
2261 | if (parent && parent->dir->is_freezing()) return true; | |
2262 | return false; | |
2263 | } | |
2264 | ||
2265 | void CInode::add_dir_waiter(frag_t fg, MDSInternalContextBase *c) | |
2266 | { | |
2267 | if (waiting_on_dir.empty()) | |
2268 | get(PIN_DIRWAITER); | |
2269 | waiting_on_dir[fg].push_back(c); | |
2270 | dout(10) << "add_dir_waiter frag " << fg << " " << c << " on " << *this << dendl; | |
2271 | } | |
2272 | ||
2273 | void CInode::take_dir_waiting(frag_t fg, list<MDSInternalContextBase*>& ls) | |
2274 | { | |
2275 | if (waiting_on_dir.empty()) | |
2276 | return; | |
2277 | ||
2278 | compact_map<frag_t, list<MDSInternalContextBase*> >::iterator p = waiting_on_dir.find(fg); | |
2279 | if (p != waiting_on_dir.end()) { | |
2280 | dout(10) << "take_dir_waiting frag " << fg << " on " << *this << dendl; | |
2281 | ls.splice(ls.end(), p->second); | |
2282 | waiting_on_dir.erase(p); | |
2283 | ||
2284 | if (waiting_on_dir.empty()) | |
2285 | put(PIN_DIRWAITER); | |
2286 | } | |
2287 | } | |
2288 | ||
2289 | void CInode::add_waiter(uint64_t tag, MDSInternalContextBase *c) | |
2290 | { | |
2291 | dout(10) << "add_waiter tag " << std::hex << tag << std::dec << " " << c | |
2292 | << " !ambig " << !state_test(STATE_AMBIGUOUSAUTH) | |
2293 | << " !frozen " << !is_frozen_inode() | |
2294 | << " !freezing " << !is_freezing_inode() | |
2295 | << dendl; | |
2296 | // wait on the directory? | |
2297 | // make sure its not the inode that is explicitly ambiguous|freezing|frozen | |
2298 | if (((tag & WAIT_SINGLEAUTH) && !state_test(STATE_AMBIGUOUSAUTH)) || | |
2299 | ((tag & WAIT_UNFREEZE) && | |
2300 | !is_frozen_inode() && !is_freezing_inode() && !is_frozen_auth_pin())) { | |
2301 | dout(15) << "passing waiter up tree" << dendl; | |
2302 | parent->dir->add_waiter(tag, c); | |
2303 | return; | |
2304 | } | |
2305 | dout(15) << "taking waiter here" << dendl; | |
2306 | MDSCacheObject::add_waiter(tag, c); | |
2307 | } | |
2308 | ||
2309 | void CInode::take_waiting(uint64_t mask, list<MDSInternalContextBase*>& ls) | |
2310 | { | |
2311 | if ((mask & WAIT_DIR) && !waiting_on_dir.empty()) { | |
2312 | // take all dentry waiters | |
2313 | while (!waiting_on_dir.empty()) { | |
2314 | compact_map<frag_t, list<MDSInternalContextBase*> >::iterator p = waiting_on_dir.begin(); | |
2315 | dout(10) << "take_waiting dirfrag " << p->first << " on " << *this << dendl; | |
2316 | ls.splice(ls.end(), p->second); | |
2317 | waiting_on_dir.erase(p); | |
2318 | } | |
2319 | put(PIN_DIRWAITER); | |
2320 | } | |
2321 | ||
2322 | // waiting | |
2323 | MDSCacheObject::take_waiting(mask, ls); | |
2324 | } | |
2325 | ||
2326 | bool CInode::freeze_inode(int auth_pin_allowance) | |
2327 | { | |
2328 | assert(auth_pin_allowance > 0); // otherwise we need to adjust parent's nested_auth_pins | |
2329 | assert(auth_pins >= auth_pin_allowance); | |
2330 | if (auth_pins > auth_pin_allowance) { | |
2331 | dout(10) << "freeze_inode - waiting for auth_pins to drop to " << auth_pin_allowance << dendl; | |
2332 | auth_pin_freeze_allowance = auth_pin_allowance; | |
2333 | get(PIN_FREEZING); | |
2334 | state_set(STATE_FREEZING); | |
2335 | return false; | |
2336 | } | |
2337 | ||
2338 | dout(10) << "freeze_inode - frozen" << dendl; | |
2339 | assert(auth_pins == auth_pin_allowance); | |
2340 | if (!state_test(STATE_FROZEN)) { | |
2341 | get(PIN_FROZEN); | |
2342 | state_set(STATE_FROZEN); | |
2343 | } | |
2344 | return true; | |
2345 | } | |
2346 | ||
2347 | void CInode::unfreeze_inode(list<MDSInternalContextBase*>& finished) | |
2348 | { | |
2349 | dout(10) << "unfreeze_inode" << dendl; | |
2350 | if (state_test(STATE_FREEZING)) { | |
2351 | state_clear(STATE_FREEZING); | |
2352 | put(PIN_FREEZING); | |
2353 | } else if (state_test(STATE_FROZEN)) { | |
2354 | state_clear(STATE_FROZEN); | |
2355 | put(PIN_FROZEN); | |
2356 | } else | |
2357 | ceph_abort(); | |
2358 | take_waiting(WAIT_UNFREEZE, finished); | |
2359 | } | |
2360 | ||
2361 | void CInode::unfreeze_inode() | |
2362 | { | |
2363 | list<MDSInternalContextBase*> finished; | |
2364 | unfreeze_inode(finished); | |
2365 | mdcache->mds->queue_waiters(finished); | |
2366 | } | |
2367 | ||
2368 | void CInode::freeze_auth_pin() | |
2369 | { | |
2370 | assert(state_test(CInode::STATE_FROZEN)); | |
2371 | state_set(CInode::STATE_FROZENAUTHPIN); | |
2372 | } | |
2373 | ||
2374 | void CInode::unfreeze_auth_pin() | |
2375 | { | |
2376 | assert(state_test(CInode::STATE_FROZENAUTHPIN)); | |
2377 | state_clear(CInode::STATE_FROZENAUTHPIN); | |
2378 | if (!state_test(STATE_FREEZING|STATE_FROZEN)) { | |
2379 | list<MDSInternalContextBase*> finished; | |
2380 | take_waiting(WAIT_UNFREEZE, finished); | |
2381 | mdcache->mds->queue_waiters(finished); | |
2382 | } | |
2383 | } | |
2384 | ||
2385 | void CInode::clear_ambiguous_auth(list<MDSInternalContextBase*>& finished) | |
2386 | { | |
2387 | assert(state_test(CInode::STATE_AMBIGUOUSAUTH)); | |
2388 | state_clear(CInode::STATE_AMBIGUOUSAUTH); | |
2389 | take_waiting(CInode::WAIT_SINGLEAUTH, finished); | |
2390 | } | |
2391 | ||
2392 | void CInode::clear_ambiguous_auth() | |
2393 | { | |
2394 | list<MDSInternalContextBase*> finished; | |
2395 | clear_ambiguous_auth(finished); | |
2396 | mdcache->mds->queue_waiters(finished); | |
2397 | } | |
2398 | ||
2399 | // auth_pins | |
2400 | bool CInode::can_auth_pin() const { | |
2401 | if (!is_auth() || is_freezing_inode() || is_frozen_inode() || is_frozen_auth_pin()) | |
2402 | return false; | |
2403 | if (parent) | |
2404 | return parent->can_auth_pin(); | |
2405 | return true; | |
2406 | } | |
2407 | ||
2408 | void CInode::auth_pin(void *by) | |
2409 | { | |
2410 | if (auth_pins == 0) | |
2411 | get(PIN_AUTHPIN); | |
2412 | auth_pins++; | |
2413 | ||
2414 | #ifdef MDS_AUTHPIN_SET | |
2415 | auth_pin_set.insert(by); | |
2416 | #endif | |
2417 | ||
2418 | dout(10) << "auth_pin by " << by << " on " << *this | |
2419 | << " now " << auth_pins << "+" << nested_auth_pins | |
2420 | << dendl; | |
2421 | ||
2422 | if (parent) | |
2423 | parent->adjust_nested_auth_pins(1, 1, this); | |
2424 | } | |
2425 | ||
2426 | void CInode::auth_unpin(void *by) | |
2427 | { | |
2428 | auth_pins--; | |
2429 | ||
2430 | #ifdef MDS_AUTHPIN_SET | |
2431 | assert(auth_pin_set.count(by)); | |
2432 | auth_pin_set.erase(auth_pin_set.find(by)); | |
2433 | #endif | |
2434 | ||
2435 | if (auth_pins == 0) | |
2436 | put(PIN_AUTHPIN); | |
2437 | ||
2438 | dout(10) << "auth_unpin by " << by << " on " << *this | |
2439 | << " now " << auth_pins << "+" << nested_auth_pins | |
2440 | << dendl; | |
2441 | ||
2442 | assert(auth_pins >= 0); | |
2443 | ||
2444 | if (parent) | |
2445 | parent->adjust_nested_auth_pins(-1, -1, by); | |
2446 | ||
2447 | if (is_freezing_inode() && | |
2448 | auth_pins == auth_pin_freeze_allowance) { | |
2449 | dout(10) << "auth_unpin freezing!" << dendl; | |
2450 | get(PIN_FROZEN); | |
2451 | put(PIN_FREEZING); | |
2452 | state_clear(STATE_FREEZING); | |
2453 | state_set(STATE_FROZEN); | |
2454 | finish_waiting(WAIT_FROZEN); | |
2455 | } | |
2456 | } | |
2457 | ||
2458 | void CInode::adjust_nested_auth_pins(int a, void *by) | |
2459 | { | |
2460 | assert(a); | |
2461 | nested_auth_pins += a; | |
2462 | dout(35) << "adjust_nested_auth_pins by " << by | |
2463 | << " change " << a << " yields " | |
2464 | << auth_pins << "+" << nested_auth_pins << dendl; | |
2465 | assert(nested_auth_pins >= 0); | |
2466 | ||
2467 | if (g_conf->mds_debug_auth_pins) { | |
2468 | // audit | |
2469 | int s = 0; | |
2470 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
2471 | p != dirfrags.end(); | |
2472 | ++p) { | |
2473 | CDir *dir = p->second; | |
2474 | if (!dir->is_subtree_root() && dir->get_cum_auth_pins()) | |
2475 | s++; | |
2476 | } | |
2477 | assert(s == nested_auth_pins); | |
2478 | } | |
2479 | ||
2480 | if (parent) | |
2481 | parent->adjust_nested_auth_pins(a, 0, by); | |
2482 | } | |
2483 | ||
2484 | ||
2485 | // authority | |
2486 | ||
2487 | mds_authority_t CInode::authority() const | |
2488 | { | |
2489 | if (inode_auth.first >= 0) | |
2490 | return inode_auth; | |
2491 | ||
2492 | if (parent) | |
2493 | return parent->dir->authority(); | |
2494 | ||
2495 | // new items that are not yet linked in (in the committed plane) belong | |
2496 | // to their first parent. | |
2497 | if (!projected_parent.empty()) | |
2498 | return projected_parent.front()->dir->authority(); | |
2499 | ||
2500 | return CDIR_AUTH_UNDEF; | |
2501 | } | |
2502 | ||
2503 | ||
2504 | // SNAP | |
2505 | ||
2506 | snapid_t CInode::get_oldest_snap() | |
2507 | { | |
2508 | snapid_t t = first; | |
2509 | if (!old_inodes.empty()) | |
2510 | t = old_inodes.begin()->second.first; | |
2511 | return MIN(t, oldest_snap); | |
2512 | } | |
2513 | ||
2514 | old_inode_t& CInode::cow_old_inode(snapid_t follows, bool cow_head) | |
2515 | { | |
2516 | assert(follows >= first); | |
2517 | ||
2518 | inode_t *pi = cow_head ? get_projected_inode() : get_previous_projected_inode(); | |
2519 | map<string,bufferptr> *px = cow_head ? get_projected_xattrs() : get_previous_projected_xattrs(); | |
2520 | ||
2521 | old_inode_t &old = old_inodes[follows]; | |
2522 | old.first = first; | |
2523 | old.inode = *pi; | |
2524 | old.xattrs = *px; | |
2525 | ||
2526 | if (first < oldest_snap) | |
2527 | oldest_snap = first; | |
2528 | ||
2529 | dout(10) << " " << px->size() << " xattrs cowed, " << *px << dendl; | |
2530 | ||
2531 | old.inode.trim_client_ranges(follows); | |
2532 | ||
2533 | if (g_conf->mds_snap_rstat && | |
2534 | !(old.inode.rstat == old.inode.accounted_rstat)) | |
2535 | dirty_old_rstats.insert(follows); | |
2536 | ||
2537 | first = follows+1; | |
2538 | ||
2539 | dout(10) << "cow_old_inode " << (cow_head ? "head" : "previous_head" ) | |
2540 | << " to [" << old.first << "," << follows << "] on " | |
2541 | << *this << dendl; | |
2542 | ||
2543 | return old; | |
2544 | } | |
2545 | ||
2546 | void CInode::split_old_inode(snapid_t snap) | |
2547 | { | |
2548 | compact_map<snapid_t, old_inode_t>::iterator p = old_inodes.lower_bound(snap); | |
2549 | assert(p != old_inodes.end() && p->second.first < snap); | |
2550 | ||
2551 | old_inode_t &old = old_inodes[snap - 1]; | |
2552 | old = p->second; | |
2553 | ||
2554 | p->second.first = snap; | |
2555 | dout(10) << "split_old_inode " << "[" << old.first << "," << p->first | |
2556 | << "] to [" << snap << "," << p->first << "] on " << *this << dendl; | |
2557 | } | |
2558 | ||
2559 | void CInode::pre_cow_old_inode() | |
2560 | { | |
2561 | snapid_t follows = find_snaprealm()->get_newest_seq(); | |
2562 | if (first <= follows) | |
2563 | cow_old_inode(follows, true); | |
2564 | } | |
2565 | ||
2566 | void CInode::purge_stale_snap_data(const set<snapid_t>& snaps) | |
2567 | { | |
2568 | dout(10) << "purge_stale_snap_data " << snaps << dendl; | |
2569 | ||
2570 | if (old_inodes.empty()) | |
2571 | return; | |
2572 | ||
2573 | compact_map<snapid_t,old_inode_t>::iterator p = old_inodes.begin(); | |
2574 | while (p != old_inodes.end()) { | |
2575 | set<snapid_t>::const_iterator q = snaps.lower_bound(p->second.first); | |
2576 | if (q == snaps.end() || *q > p->first) { | |
2577 | dout(10) << " purging old_inode [" << p->second.first << "," << p->first << "]" << dendl; | |
2578 | old_inodes.erase(p++); | |
2579 | } else | |
2580 | ++p; | |
2581 | } | |
2582 | } | |
2583 | ||
2584 | /* | |
2585 | * pick/create an old_inode | |
2586 | */ | |
2587 | old_inode_t * CInode::pick_old_inode(snapid_t snap) | |
2588 | { | |
2589 | compact_map<snapid_t, old_inode_t>::iterator p = old_inodes.lower_bound(snap); // p is first key >= to snap | |
2590 | if (p != old_inodes.end() && p->second.first <= snap) { | |
2591 | dout(10) << "pick_old_inode snap " << snap << " -> [" << p->second.first << "," << p->first << "]" << dendl; | |
2592 | return &p->second; | |
2593 | } | |
2594 | dout(10) << "pick_old_inode snap " << snap << " -> nothing" << dendl; | |
2595 | return NULL; | |
2596 | } | |
2597 | ||
2598 | void CInode::open_snaprealm(bool nosplit) | |
2599 | { | |
2600 | if (!snaprealm) { | |
2601 | SnapRealm *parent = find_snaprealm(); | |
2602 | snaprealm = new SnapRealm(mdcache, this); | |
2603 | if (parent) { | |
2604 | dout(10) << "open_snaprealm " << snaprealm | |
2605 | << " parent is " << parent | |
2606 | << dendl; | |
2607 | dout(30) << " siblings are " << parent->open_children << dendl; | |
2608 | snaprealm->parent = parent; | |
2609 | if (!nosplit) | |
2610 | parent->split_at(snaprealm); | |
2611 | parent->open_children.insert(snaprealm); | |
2612 | } | |
2613 | } | |
2614 | } | |
2615 | void CInode::close_snaprealm(bool nojoin) | |
2616 | { | |
2617 | if (snaprealm) { | |
2618 | dout(15) << "close_snaprealm " << *snaprealm << dendl; | |
2619 | snaprealm->close_parents(); | |
2620 | if (snaprealm->parent) { | |
2621 | snaprealm->parent->open_children.erase(snaprealm); | |
2622 | //if (!nojoin) | |
2623 | //snaprealm->parent->join(snaprealm); | |
2624 | } | |
2625 | delete snaprealm; | |
2626 | snaprealm = 0; | |
2627 | } | |
2628 | } | |
2629 | ||
2630 | SnapRealm *CInode::find_snaprealm() const | |
2631 | { | |
2632 | const CInode *cur = this; | |
2633 | while (!cur->snaprealm) { | |
2634 | if (cur->get_parent_dn()) | |
2635 | cur = cur->get_parent_dn()->get_dir()->get_inode(); | |
2636 | else if (get_projected_parent_dn()) | |
2637 | cur = cur->get_projected_parent_dn()->get_dir()->get_inode(); | |
2638 | else | |
2639 | break; | |
2640 | } | |
2641 | return cur->snaprealm; | |
2642 | } | |
2643 | ||
2644 | void CInode::encode_snap_blob(bufferlist &snapbl) | |
2645 | { | |
2646 | if (snaprealm) { | |
2647 | ::encode(snaprealm->srnode, snapbl); | |
2648 | dout(20) << "encode_snap_blob " << *snaprealm << dendl; | |
2649 | } | |
2650 | } | |
2651 | void CInode::decode_snap_blob(bufferlist& snapbl) | |
2652 | { | |
2653 | if (snapbl.length()) { | |
2654 | open_snaprealm(); | |
2655 | bufferlist::iterator p = snapbl.begin(); | |
2656 | ::decode(snaprealm->srnode, p); | |
2657 | if (is_base()) { | |
2658 | bool ok = snaprealm->_open_parents(NULL); | |
2659 | assert(ok); | |
2660 | } | |
2661 | dout(20) << "decode_snap_blob " << *snaprealm << dendl; | |
2662 | } | |
2663 | } | |
2664 | ||
2665 | void CInode::encode_snap(bufferlist& bl) | |
2666 | { | |
2667 | bufferlist snapbl; | |
2668 | encode_snap_blob(snapbl); | |
2669 | ::encode(snapbl, bl); | |
2670 | ::encode(oldest_snap, bl); | |
2671 | } | |
2672 | ||
2673 | void CInode::decode_snap(bufferlist::iterator& p) | |
2674 | { | |
2675 | bufferlist snapbl; | |
2676 | ::decode(snapbl, p); | |
2677 | ::decode(oldest_snap, p); | |
2678 | decode_snap_blob(snapbl); | |
2679 | } | |
2680 | ||
2681 | // ============================================= | |
2682 | ||
2683 | client_t CInode::calc_ideal_loner() | |
2684 | { | |
2685 | if (mdcache->is_readonly()) | |
2686 | return -1; | |
2687 | if (!mds_caps_wanted.empty()) | |
2688 | return -1; | |
2689 | ||
2690 | int n = 0; | |
2691 | client_t loner = -1; | |
2692 | for (map<client_t,Capability*>::iterator it = client_caps.begin(); | |
2693 | it != client_caps.end(); | |
2694 | ++it) | |
2695 | if (!it->second->is_stale() && | |
2696 | ((it->second->wanted() & (CEPH_CAP_ANY_WR|CEPH_CAP_FILE_WR|CEPH_CAP_FILE_RD)) || | |
2697 | (inode.is_dir() && !has_subtree_root_dirfrag()))) { | |
2698 | if (n) | |
2699 | return -1; | |
2700 | n++; | |
2701 | loner = it->first; | |
2702 | } | |
2703 | return loner; | |
2704 | } | |
2705 | ||
2706 | client_t CInode::choose_ideal_loner() | |
2707 | { | |
2708 | want_loner_cap = calc_ideal_loner(); | |
2709 | return want_loner_cap; | |
2710 | } | |
2711 | ||
2712 | bool CInode::try_set_loner() | |
2713 | { | |
2714 | assert(want_loner_cap >= 0); | |
2715 | if (loner_cap >= 0 && loner_cap != want_loner_cap) | |
2716 | return false; | |
2717 | set_loner_cap(want_loner_cap); | |
2718 | return true; | |
2719 | } | |
2720 | ||
2721 | void CInode::set_loner_cap(client_t l) | |
2722 | { | |
2723 | loner_cap = l; | |
2724 | authlock.set_excl_client(loner_cap); | |
2725 | filelock.set_excl_client(loner_cap); | |
2726 | linklock.set_excl_client(loner_cap); | |
2727 | xattrlock.set_excl_client(loner_cap); | |
2728 | } | |
2729 | ||
2730 | bool CInode::try_drop_loner() | |
2731 | { | |
2732 | if (loner_cap < 0) | |
2733 | return true; | |
2734 | ||
2735 | int other_allowed = get_caps_allowed_by_type(CAP_ANY); | |
2736 | Capability *cap = get_client_cap(loner_cap); | |
2737 | if (!cap || | |
2738 | (cap->issued() & ~other_allowed) == 0) { | |
2739 | set_loner_cap(-1); | |
2740 | return true; | |
2741 | } | |
2742 | return false; | |
2743 | } | |
2744 | ||
2745 | ||
2746 | // choose new lock state during recovery, based on issued caps | |
2747 | void CInode::choose_lock_state(SimpleLock *lock, int allissued) | |
2748 | { | |
2749 | int shift = lock->get_cap_shift(); | |
2750 | int issued = (allissued >> shift) & lock->get_cap_mask(); | |
2751 | if (is_auth()) { | |
2752 | if (lock->is_xlocked()) { | |
2753 | // do nothing here | |
2754 | } else if (lock->get_state() != LOCK_MIX) { | |
2755 | if (issued & (CEPH_CAP_GEXCL | CEPH_CAP_GBUFFER)) | |
2756 | lock->set_state(LOCK_EXCL); | |
2757 | else if (issued & CEPH_CAP_GWR) | |
2758 | lock->set_state(LOCK_MIX); | |
2759 | else if (lock->is_dirty()) { | |
2760 | if (is_replicated()) | |
2761 | lock->set_state(LOCK_MIX); | |
2762 | else | |
2763 | lock->set_state(LOCK_LOCK); | |
2764 | } else | |
2765 | lock->set_state(LOCK_SYNC); | |
2766 | } | |
2767 | } else { | |
2768 | // our states have already been chosen during rejoin. | |
2769 | if (lock->is_xlocked()) | |
2770 | assert(lock->get_state() == LOCK_LOCK); | |
2771 | } | |
2772 | } | |
2773 | ||
2774 | void CInode::choose_lock_states(int dirty_caps) | |
2775 | { | |
2776 | int issued = get_caps_issued() | dirty_caps; | |
2777 | if (is_auth() && (issued & (CEPH_CAP_ANY_EXCL|CEPH_CAP_ANY_WR)) && | |
2778 | choose_ideal_loner() >= 0) | |
2779 | try_set_loner(); | |
2780 | choose_lock_state(&filelock, issued); | |
2781 | choose_lock_state(&nestlock, issued); | |
2782 | choose_lock_state(&dirfragtreelock, issued); | |
2783 | choose_lock_state(&authlock, issued); | |
2784 | choose_lock_state(&xattrlock, issued); | |
2785 | choose_lock_state(&linklock, issued); | |
2786 | } | |
2787 | ||
2788 | Capability *CInode::add_client_cap(client_t client, Session *session, SnapRealm *conrealm) | |
2789 | { | |
2790 | if (client_caps.empty()) { | |
2791 | get(PIN_CAPS); | |
2792 | if (conrealm) | |
2793 | containing_realm = conrealm; | |
2794 | else | |
2795 | containing_realm = find_snaprealm(); | |
2796 | containing_realm->inodes_with_caps.push_back(&item_caps); | |
2797 | dout(10) << "add_client_cap first cap, joining realm " << *containing_realm << dendl; | |
2798 | } | |
2799 | ||
2800 | if (client_caps.empty()) | |
2801 | mdcache->num_inodes_with_caps++; | |
2802 | ||
2803 | Capability *cap = new Capability(this, ++mdcache->last_cap_id, client); | |
2804 | assert(client_caps.count(client) == 0); | |
2805 | client_caps[client] = cap; | |
2806 | ||
2807 | session->add_cap(cap); | |
2808 | if (session->is_stale()) | |
2809 | cap->mark_stale(); | |
2810 | ||
2811 | cap->client_follows = first-1; | |
2812 | ||
2813 | containing_realm->add_cap(client, cap); | |
2814 | ||
2815 | return cap; | |
2816 | } | |
2817 | ||
2818 | void CInode::remove_client_cap(client_t client) | |
2819 | { | |
2820 | assert(client_caps.count(client) == 1); | |
2821 | Capability *cap = client_caps[client]; | |
2822 | ||
2823 | cap->item_session_caps.remove_myself(); | |
2824 | cap->item_revoking_caps.remove_myself(); | |
2825 | cap->item_client_revoking_caps.remove_myself(); | |
2826 | containing_realm->remove_cap(client, cap); | |
2827 | ||
2828 | if (client == loner_cap) | |
2829 | loner_cap = -1; | |
2830 | ||
2831 | delete cap; | |
2832 | client_caps.erase(client); | |
2833 | if (client_caps.empty()) { | |
2834 | dout(10) << "remove_client_cap last cap, leaving realm " << *containing_realm << dendl; | |
2835 | put(PIN_CAPS); | |
2836 | item_caps.remove_myself(); | |
2837 | containing_realm = NULL; | |
2838 | item_open_file.remove_myself(); // unpin logsegment | |
2839 | mdcache->num_inodes_with_caps--; | |
2840 | } | |
2841 | ||
2842 | //clean up advisory locks | |
2843 | bool fcntl_removed = fcntl_locks ? fcntl_locks->remove_all_from(client) : false; | |
2844 | bool flock_removed = flock_locks ? flock_locks->remove_all_from(client) : false; | |
2845 | if (fcntl_removed || flock_removed) { | |
2846 | list<MDSInternalContextBase*> waiters; | |
2847 | take_waiting(CInode::WAIT_FLOCK, waiters); | |
2848 | mdcache->mds->queue_waiters(waiters); | |
2849 | } | |
2850 | } | |
2851 | ||
2852 | void CInode::move_to_realm(SnapRealm *realm) | |
2853 | { | |
2854 | dout(10) << "move_to_realm joining realm " << *realm | |
2855 | << ", leaving realm " << *containing_realm << dendl; | |
2856 | for (map<client_t,Capability*>::iterator q = client_caps.begin(); | |
2857 | q != client_caps.end(); | |
2858 | ++q) { | |
2859 | containing_realm->remove_cap(q->first, q->second); | |
2860 | realm->add_cap(q->first, q->second); | |
2861 | } | |
2862 | item_caps.remove_myself(); | |
2863 | realm->inodes_with_caps.push_back(&item_caps); | |
2864 | containing_realm = realm; | |
2865 | } | |
2866 | ||
2867 | Capability *CInode::reconnect_cap(client_t client, const cap_reconnect_t& icr, Session *session) | |
2868 | { | |
2869 | Capability *cap = get_client_cap(client); | |
2870 | if (cap) { | |
2871 | // FIXME? | |
2872 | cap->merge(icr.capinfo.wanted, icr.capinfo.issued); | |
2873 | } else { | |
2874 | cap = add_client_cap(client, session); | |
2875 | cap->set_cap_id(icr.capinfo.cap_id); | |
2876 | cap->set_wanted(icr.capinfo.wanted); | |
2877 | cap->issue_norevoke(icr.capinfo.issued); | |
2878 | cap->reset_seq(); | |
2879 | } | |
2880 | cap->set_last_issue_stamp(ceph_clock_now()); | |
2881 | return cap; | |
2882 | } | |
2883 | ||
2884 | void CInode::clear_client_caps_after_export() | |
2885 | { | |
2886 | while (!client_caps.empty()) | |
2887 | remove_client_cap(client_caps.begin()->first); | |
2888 | loner_cap = -1; | |
2889 | want_loner_cap = -1; | |
2890 | mds_caps_wanted.clear(); | |
2891 | } | |
2892 | ||
2893 | void CInode::export_client_caps(map<client_t,Capability::Export>& cl) | |
2894 | { | |
2895 | for (map<client_t,Capability*>::iterator it = client_caps.begin(); | |
2896 | it != client_caps.end(); | |
2897 | ++it) { | |
2898 | cl[it->first] = it->second->make_export(); | |
2899 | } | |
2900 | } | |
2901 | ||
2902 | // caps allowed | |
2903 | int CInode::get_caps_liked() const | |
2904 | { | |
2905 | if (is_dir()) | |
2906 | return CEPH_CAP_PIN | CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_SHARED; // but not, say, FILE_RD|WR|WRBUFFER | |
2907 | else | |
2908 | return CEPH_CAP_ANY & ~CEPH_CAP_FILE_LAZYIO; | |
2909 | } | |
2910 | ||
2911 | int CInode::get_caps_allowed_ever() const | |
2912 | { | |
2913 | int allowed; | |
2914 | if (is_dir()) | |
2915 | allowed = CEPH_CAP_PIN | CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_SHARED; | |
2916 | else | |
2917 | allowed = CEPH_CAP_ANY; | |
2918 | return allowed & | |
2919 | (CEPH_CAP_PIN | | |
2920 | (filelock.gcaps_allowed_ever() << filelock.get_cap_shift()) | | |
2921 | (authlock.gcaps_allowed_ever() << authlock.get_cap_shift()) | | |
2922 | (xattrlock.gcaps_allowed_ever() << xattrlock.get_cap_shift()) | | |
2923 | (linklock.gcaps_allowed_ever() << linklock.get_cap_shift())); | |
2924 | } | |
2925 | ||
2926 | int CInode::get_caps_allowed_by_type(int type) const | |
2927 | { | |
2928 | return | |
2929 | CEPH_CAP_PIN | | |
2930 | (filelock.gcaps_allowed(type) << filelock.get_cap_shift()) | | |
2931 | (authlock.gcaps_allowed(type) << authlock.get_cap_shift()) | | |
2932 | (xattrlock.gcaps_allowed(type) << xattrlock.get_cap_shift()) | | |
2933 | (linklock.gcaps_allowed(type) << linklock.get_cap_shift()); | |
2934 | } | |
2935 | ||
2936 | int CInode::get_caps_careful() const | |
2937 | { | |
2938 | return | |
2939 | (filelock.gcaps_careful() << filelock.get_cap_shift()) | | |
2940 | (authlock.gcaps_careful() << authlock.get_cap_shift()) | | |
2941 | (xattrlock.gcaps_careful() << xattrlock.get_cap_shift()) | | |
2942 | (linklock.gcaps_careful() << linklock.get_cap_shift()); | |
2943 | } | |
2944 | ||
2945 | int CInode::get_xlocker_mask(client_t client) const | |
2946 | { | |
2947 | return | |
2948 | (filelock.gcaps_xlocker_mask(client) << filelock.get_cap_shift()) | | |
2949 | (authlock.gcaps_xlocker_mask(client) << authlock.get_cap_shift()) | | |
2950 | (xattrlock.gcaps_xlocker_mask(client) << xattrlock.get_cap_shift()) | | |
2951 | (linklock.gcaps_xlocker_mask(client) << linklock.get_cap_shift()); | |
2952 | } | |
2953 | ||
2954 | int CInode::get_caps_allowed_for_client(Session *session, inode_t *file_i) const | |
2955 | { | |
2956 | client_t client = session->info.inst.name.num(); | |
2957 | int allowed; | |
2958 | if (client == get_loner()) { | |
2959 | // as the loner, we get the loner_caps AND any xlocker_caps for things we have xlocked | |
2960 | allowed = | |
2961 | get_caps_allowed_by_type(CAP_LONER) | | |
2962 | (get_caps_allowed_by_type(CAP_XLOCKER) & get_xlocker_mask(client)); | |
2963 | } else { | |
2964 | allowed = get_caps_allowed_by_type(CAP_ANY); | |
2965 | } | |
2966 | ||
2967 | if (!is_dir()) { | |
2968 | if ((file_i->inline_data.version != CEPH_INLINE_NONE && | |
2969 | !session->connection->has_feature(CEPH_FEATURE_MDS_INLINE_DATA)) || | |
2970 | (!file_i->layout.pool_ns.empty() && | |
2971 | !session->connection->has_feature(CEPH_FEATURE_FS_FILE_LAYOUT_V2))) | |
2972 | allowed &= ~(CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR); | |
2973 | } | |
2974 | return allowed; | |
2975 | } | |
2976 | ||
2977 | // caps issued, wanted | |
2978 | int CInode::get_caps_issued(int *ploner, int *pother, int *pxlocker, | |
2979 | int shift, int mask) | |
2980 | { | |
2981 | int c = 0; | |
2982 | int loner = 0, other = 0, xlocker = 0; | |
2983 | if (!is_auth()) { | |
2984 | loner_cap = -1; | |
2985 | } | |
2986 | ||
2987 | for (map<client_t,Capability*>::const_iterator it = client_caps.begin(); | |
2988 | it != client_caps.end(); | |
2989 | ++it) { | |
2990 | int i = it->second->issued(); | |
2991 | c |= i; | |
2992 | if (it->first == loner_cap) | |
2993 | loner |= i; | |
2994 | else | |
2995 | other |= i; | |
2996 | xlocker |= get_xlocker_mask(it->first) & i; | |
2997 | } | |
2998 | if (ploner) *ploner = (loner >> shift) & mask; | |
2999 | if (pother) *pother = (other >> shift) & mask; | |
3000 | if (pxlocker) *pxlocker = (xlocker >> shift) & mask; | |
3001 | return (c >> shift) & mask; | |
3002 | } | |
3003 | ||
3004 | bool CInode::is_any_caps_wanted() const | |
3005 | { | |
3006 | for (map<client_t,Capability*>::const_iterator it = client_caps.begin(); | |
3007 | it != client_caps.end(); | |
3008 | ++it) | |
3009 | if (it->second->wanted()) | |
3010 | return true; | |
3011 | return false; | |
3012 | } | |
3013 | ||
3014 | int CInode::get_caps_wanted(int *ploner, int *pother, int shift, int mask) const | |
3015 | { | |
3016 | int w = 0; | |
3017 | int loner = 0, other = 0; | |
3018 | for (map<client_t,Capability*>::const_iterator it = client_caps.begin(); | |
3019 | it != client_caps.end(); | |
3020 | ++it) { | |
3021 | if (!it->second->is_stale()) { | |
3022 | int t = it->second->wanted(); | |
3023 | w |= t; | |
3024 | if (it->first == loner_cap) | |
3025 | loner |= t; | |
3026 | else | |
3027 | other |= t; | |
3028 | } | |
3029 | //cout << " get_caps_wanted client " << it->first << " " << cap_string(it->second.wanted()) << endl; | |
3030 | } | |
3031 | if (is_auth()) | |
3032 | for (compact_map<int,int>::const_iterator it = mds_caps_wanted.begin(); | |
3033 | it != mds_caps_wanted.end(); | |
3034 | ++it) { | |
3035 | w |= it->second; | |
3036 | other |= it->second; | |
3037 | //cout << " get_caps_wanted mds " << it->first << " " << cap_string(it->second) << endl; | |
3038 | } | |
3039 | if (ploner) *ploner = (loner >> shift) & mask; | |
3040 | if (pother) *pother = (other >> shift) & mask; | |
3041 | return (w >> shift) & mask; | |
3042 | } | |
3043 | ||
3044 | bool CInode::issued_caps_need_gather(SimpleLock *lock) | |
3045 | { | |
3046 | int loner_issued, other_issued, xlocker_issued; | |
3047 | get_caps_issued(&loner_issued, &other_issued, &xlocker_issued, | |
3048 | lock->get_cap_shift(), lock->get_cap_mask()); | |
3049 | if ((loner_issued & ~lock->gcaps_allowed(CAP_LONER)) || | |
3050 | (other_issued & ~lock->gcaps_allowed(CAP_ANY)) || | |
3051 | (xlocker_issued & ~lock->gcaps_allowed(CAP_XLOCKER))) | |
3052 | return true; | |
3053 | return false; | |
3054 | } | |
3055 | ||
3056 | void CInode::replicate_relax_locks() | |
3057 | { | |
3058 | //dout(10) << " relaxing locks on " << *this << dendl; | |
3059 | assert(is_auth()); | |
3060 | assert(!is_replicated()); | |
3061 | ||
3062 | authlock.replicate_relax(); | |
3063 | linklock.replicate_relax(); | |
3064 | dirfragtreelock.replicate_relax(); | |
3065 | filelock.replicate_relax(); | |
3066 | xattrlock.replicate_relax(); | |
3067 | snaplock.replicate_relax(); | |
3068 | nestlock.replicate_relax(); | |
3069 | flocklock.replicate_relax(); | |
3070 | policylock.replicate_relax(); | |
3071 | } | |
3072 | ||
3073 | ||
3074 | ||
3075 | // ============================================= | |
3076 | ||
3077 | int CInode::encode_inodestat(bufferlist& bl, Session *session, | |
3078 | SnapRealm *dir_realm, | |
3079 | snapid_t snapid, | |
3080 | unsigned max_bytes, | |
3081 | int getattr_caps) | |
3082 | { | |
31f18b77 | 3083 | client_t client = session->info.inst.name.num(); |
7c673cae FG |
3084 | assert(snapid); |
3085 | assert(session->connection); | |
3086 | ||
3087 | bool valid = true; | |
3088 | ||
3089 | // pick a version! | |
3090 | inode_t *oi = &inode; | |
3091 | inode_t *pi = get_projected_inode(); | |
3092 | ||
3093 | map<string, bufferptr> *pxattrs = 0; | |
3094 | ||
3095 | if (snapid != CEPH_NOSNAP) { | |
3096 | ||
3097 | // for now at least, old_inodes is only defined/valid on the auth | |
3098 | if (!is_auth()) | |
3099 | valid = false; | |
3100 | ||
3101 | if (is_multiversion()) { | |
3102 | compact_map<snapid_t,old_inode_t>::iterator p = old_inodes.lower_bound(snapid); | |
3103 | if (p != old_inodes.end()) { | |
3104 | if (p->second.first > snapid) { | |
3105 | if (p != old_inodes.begin()) | |
3106 | --p; | |
3107 | } | |
3108 | if (p->second.first <= snapid && snapid <= p->first) { | |
3109 | dout(15) << "encode_inodestat snapid " << snapid | |
3110 | << " to old_inode [" << p->second.first << "," << p->first << "]" | |
3111 | << " " << p->second.inode.rstat | |
3112 | << dendl; | |
3113 | pi = oi = &p->second.inode; | |
3114 | pxattrs = &p->second.xattrs; | |
3115 | } else { | |
3116 | // snapshoted remote dentry can result this | |
3117 | dout(0) << "encode_inodestat old_inode for snapid " << snapid | |
3118 | << " not found" << dendl; | |
3119 | } | |
3120 | } | |
3121 | } else if (snapid < first || snapid > last) { | |
3122 | // snapshoted remote dentry can result this | |
3123 | dout(0) << "encode_inodestat [" << first << "," << last << "]" | |
3124 | << " not match snapid " << snapid << dendl; | |
3125 | } | |
3126 | } | |
3127 | ||
3128 | SnapRealm *realm = find_snaprealm(); | |
3129 | ||
3130 | bool no_caps = !valid || | |
3131 | session->is_stale() || | |
3132 | (dir_realm && realm != dir_realm) || | |
3133 | is_frozen() || | |
3134 | state_test(CInode::STATE_EXPORTINGCAPS); | |
3135 | if (no_caps) | |
3136 | dout(20) << "encode_inodestat no caps" | |
3137 | << (!valid?", !valid":"") | |
3138 | << (session->is_stale()?", session stale ":"") | |
3139 | << ((dir_realm && realm != dir_realm)?", snaprealm differs ":"") | |
3140 | << (is_frozen()?", frozen inode":"") | |
3141 | << (state_test(CInode::STATE_EXPORTINGCAPS)?", exporting caps":"") | |
3142 | << dendl; | |
3143 | ||
3144 | ||
3145 | // "fake" a version that is old (stable) version, +1 if projected. | |
3146 | version_t version = (oi->version * 2) + is_projected(); | |
3147 | ||
3148 | Capability *cap = get_client_cap(client); | |
3149 | bool pfile = filelock.is_xlocked_by_client(client) || get_loner() == client; | |
3150 | //(cap && (cap->issued() & CEPH_CAP_FILE_EXCL)); | |
3151 | bool pauth = authlock.is_xlocked_by_client(client) || get_loner() == client; | |
3152 | bool plink = linklock.is_xlocked_by_client(client) || get_loner() == client; | |
3153 | bool pxattr = xattrlock.is_xlocked_by_client(client) || get_loner() == client; | |
3154 | ||
3155 | bool plocal = versionlock.get_last_wrlock_client() == client; | |
3156 | bool ppolicy = policylock.is_xlocked_by_client(client) || get_loner()==client; | |
3157 | ||
3158 | inode_t *any_i = (pfile|pauth|plink|pxattr|plocal) ? pi : oi; | |
3159 | ||
3160 | dout(20) << " pfile " << pfile << " pauth " << pauth | |
3161 | << " plink " << plink << " pxattr " << pxattr | |
3162 | << " plocal " << plocal | |
3163 | << " ctime " << any_i->ctime | |
3164 | << " valid=" << valid << dendl; | |
3165 | ||
3166 | // file | |
3167 | inode_t *file_i = pfile ? pi:oi; | |
3168 | file_layout_t layout; | |
3169 | if (is_dir()) { | |
3170 | layout = (ppolicy ? pi : oi)->layout; | |
3171 | } else { | |
3172 | layout = file_i->layout; | |
3173 | } | |
3174 | ||
3175 | // max_size is min of projected, actual | |
3176 | uint64_t max_size = | |
3177 | MIN(oi->client_ranges.count(client) ? | |
3178 | oi->client_ranges[client].range.last : 0, | |
3179 | pi->client_ranges.count(client) ? | |
3180 | pi->client_ranges[client].range.last : 0); | |
3181 | ||
3182 | // inline data | |
3183 | version_t inline_version = 0; | |
3184 | bufferlist inline_data; | |
3185 | if (file_i->inline_data.version == CEPH_INLINE_NONE) { | |
3186 | inline_version = CEPH_INLINE_NONE; | |
3187 | } else if ((!cap && !no_caps) || | |
3188 | (cap && cap->client_inline_version < file_i->inline_data.version) || | |
3189 | (getattr_caps & CEPH_CAP_FILE_RD)) { // client requests inline data | |
3190 | inline_version = file_i->inline_data.version; | |
3191 | if (file_i->inline_data.length() > 0) | |
3192 | inline_data = file_i->inline_data.get_data(); | |
3193 | } | |
3194 | ||
3195 | // nest (do same as file... :/) | |
3196 | if (cap) { | |
3197 | cap->last_rbytes = file_i->rstat.rbytes; | |
3198 | cap->last_rsize = file_i->rstat.rsize(); | |
3199 | } | |
3200 | ||
3201 | // auth | |
3202 | inode_t *auth_i = pauth ? pi:oi; | |
3203 | ||
3204 | // link | |
3205 | inode_t *link_i = plink ? pi:oi; | |
3206 | ||
3207 | // xattr | |
3208 | inode_t *xattr_i = pxattr ? pi:oi; | |
3209 | ||
3210 | // xattr | |
3211 | bufferlist xbl; | |
3212 | version_t xattr_version; | |
3213 | if ((!cap && !no_caps) || | |
3214 | (cap && cap->client_xattr_version < xattr_i->xattr_version) || | |
3215 | (getattr_caps & CEPH_CAP_XATTR_SHARED)) { // client requests xattrs | |
3216 | if (!pxattrs) | |
3217 | pxattrs = pxattr ? get_projected_xattrs() : &xattrs; | |
3218 | ::encode(*pxattrs, xbl); | |
3219 | xattr_version = xattr_i->xattr_version; | |
3220 | } else { | |
3221 | xattr_version = 0; | |
3222 | } | |
3223 | ||
3224 | // do we have room? | |
3225 | if (max_bytes) { | |
3226 | unsigned bytes = 8 + 8 + 4 + 8 + 8 + sizeof(ceph_mds_reply_cap) + | |
3227 | sizeof(struct ceph_file_layout) + 4 + layout.pool_ns.size() + | |
3228 | sizeof(struct ceph_timespec) * 3 + | |
3229 | 4 + 8 + 8 + 8 + 4 + 4 + 4 + 4 + 4 + | |
3230 | 8 + 8 + 8 + 8 + 8 + sizeof(struct ceph_timespec) + | |
3231 | 4; | |
3232 | bytes += sizeof(__u32); | |
3233 | bytes += (sizeof(__u32) + sizeof(__u32)) * dirfragtree._splits.size(); | |
3234 | bytes += sizeof(__u32) + symlink.length(); | |
3235 | bytes += sizeof(__u32) + xbl.length(); | |
3236 | bytes += sizeof(version_t) + sizeof(__u32) + inline_data.length(); | |
3237 | if (bytes > max_bytes) | |
3238 | return -ENOSPC; | |
3239 | } | |
3240 | ||
3241 | ||
3242 | // encode caps | |
3243 | struct ceph_mds_reply_cap ecap; | |
3244 | if (snapid != CEPH_NOSNAP) { | |
3245 | /* | |
3246 | * snapped inodes (files or dirs) only get read-only caps. always | |
3247 | * issue everything possible, since it is read only. | |
3248 | * | |
3249 | * if a snapped inode has caps, limit issued caps based on the | |
3250 | * lock state. | |
3251 | * | |
3252 | * if it is a live inode, limit issued caps based on the lock | |
3253 | * state. | |
3254 | * | |
3255 | * do NOT adjust cap issued state, because the client always | |
3256 | * tracks caps per-snap and the mds does either per-interval or | |
3257 | * multiversion. | |
3258 | */ | |
3259 | ecap.caps = valid ? get_caps_allowed_by_type(CAP_ANY) : CEPH_STAT_CAP_INODE; | |
3260 | if (last == CEPH_NOSNAP || is_any_caps()) | |
3261 | ecap.caps = ecap.caps & get_caps_allowed_for_client(session, file_i); | |
3262 | ecap.seq = 0; | |
3263 | ecap.mseq = 0; | |
3264 | ecap.realm = 0; | |
3265 | } else { | |
3266 | if (!no_caps && !cap) { | |
3267 | // add a new cap | |
3268 | cap = add_client_cap(client, session, realm); | |
3269 | if (is_auth()) { | |
3270 | if (choose_ideal_loner() >= 0) | |
3271 | try_set_loner(); | |
3272 | else if (get_wanted_loner() < 0) | |
3273 | try_drop_loner(); | |
3274 | } | |
3275 | } | |
3276 | ||
3277 | int issue = 0; | |
3278 | if (!no_caps && cap) { | |
3279 | int likes = get_caps_liked(); | |
3280 | int allowed = get_caps_allowed_for_client(session, file_i); | |
3281 | issue = (cap->wanted() | likes) & allowed; | |
3282 | cap->issue_norevoke(issue); | |
3283 | issue = cap->pending(); | |
3284 | dout(10) << "encode_inodestat issuing " << ccap_string(issue) | |
3285 | << " seq " << cap->get_last_seq() << dendl; | |
3286 | } else if (cap && cap->is_new() && !dir_realm) { | |
3287 | // alway issue new caps to client, otherwise the caps get lost | |
3288 | assert(cap->is_stale()); | |
3289 | issue = cap->pending() | CEPH_CAP_PIN; | |
3290 | cap->issue_norevoke(issue); | |
3291 | dout(10) << "encode_inodestat issuing " << ccap_string(issue) | |
3292 | << " seq " << cap->get_last_seq() | |
3293 | << "(stale|new caps)" << dendl; | |
3294 | } | |
3295 | ||
3296 | if (issue) { | |
3297 | cap->set_last_issue(); | |
3298 | cap->set_last_issue_stamp(ceph_clock_now()); | |
3299 | cap->clear_new(); | |
3300 | ecap.caps = issue; | |
3301 | ecap.wanted = cap->wanted(); | |
3302 | ecap.cap_id = cap->get_cap_id(); | |
3303 | ecap.seq = cap->get_last_seq(); | |
3304 | ecap.mseq = cap->get_mseq(); | |
3305 | ecap.realm = realm->inode->ino(); | |
3306 | } else { | |
3307 | ecap.cap_id = 0; | |
3308 | ecap.caps = 0; | |
3309 | ecap.seq = 0; | |
3310 | ecap.mseq = 0; | |
3311 | ecap.realm = 0; | |
3312 | ecap.wanted = 0; | |
3313 | } | |
3314 | } | |
3315 | ecap.flags = is_auth() ? CEPH_CAP_FLAG_AUTH : 0; | |
3316 | dout(10) << "encode_inodestat caps " << ccap_string(ecap.caps) | |
3317 | << " seq " << ecap.seq << " mseq " << ecap.mseq | |
3318 | << " xattrv " << xattr_version << " len " << xbl.length() | |
3319 | << dendl; | |
3320 | ||
3321 | if (inline_data.length() && cap) { | |
3322 | if ((cap->pending() | getattr_caps) & CEPH_CAP_FILE_SHARED) { | |
3323 | dout(10) << "including inline version " << inline_version << dendl; | |
3324 | cap->client_inline_version = inline_version; | |
3325 | } else { | |
3326 | dout(10) << "dropping inline version " << inline_version << dendl; | |
3327 | inline_version = 0; | |
3328 | inline_data.clear(); | |
3329 | } | |
3330 | } | |
3331 | ||
3332 | // include those xattrs? | |
3333 | if (xbl.length() && cap) { | |
3334 | if ((cap->pending() | getattr_caps) & CEPH_CAP_XATTR_SHARED) { | |
3335 | dout(10) << "including xattrs version " << xattr_i->xattr_version << dendl; | |
3336 | cap->client_xattr_version = xattr_i->xattr_version; | |
3337 | } else { | |
3338 | dout(10) << "dropping xattrs version " << xattr_i->xattr_version << dendl; | |
3339 | xbl.clear(); // no xattrs .. XXX what's this about?!? | |
3340 | xattr_version = 0; | |
3341 | } | |
3342 | } | |
3343 | ||
3344 | /* | |
3345 | * note: encoding matches MClientReply::InodeStat | |
3346 | */ | |
3347 | ::encode(oi->ino, bl); | |
3348 | ::encode(snapid, bl); | |
3349 | ::encode(oi->rdev, bl); | |
3350 | ::encode(version, bl); | |
3351 | ||
3352 | ::encode(xattr_version, bl); | |
3353 | ||
3354 | ::encode(ecap, bl); | |
3355 | { | |
3356 | ceph_file_layout legacy_layout; | |
3357 | layout.to_legacy(&legacy_layout); | |
3358 | ::encode(legacy_layout, bl); | |
3359 | } | |
3360 | ::encode(any_i->ctime, bl); | |
3361 | ::encode(file_i->mtime, bl); | |
3362 | ::encode(file_i->atime, bl); | |
3363 | ::encode(file_i->time_warp_seq, bl); | |
3364 | ::encode(file_i->size, bl); | |
3365 | ::encode(max_size, bl); | |
3366 | ::encode(file_i->truncate_size, bl); | |
3367 | ::encode(file_i->truncate_seq, bl); | |
3368 | ||
3369 | ::encode(auth_i->mode, bl); | |
3370 | ::encode((uint32_t)auth_i->uid, bl); | |
3371 | ::encode((uint32_t)auth_i->gid, bl); | |
3372 | ||
3373 | ::encode(link_i->nlink, bl); | |
3374 | ||
3375 | ::encode(file_i->dirstat.nfiles, bl); | |
3376 | ::encode(file_i->dirstat.nsubdirs, bl); | |
3377 | ::encode(file_i->rstat.rbytes, bl); | |
3378 | ::encode(file_i->rstat.rfiles, bl); | |
3379 | ::encode(file_i->rstat.rsubdirs, bl); | |
3380 | ::encode(file_i->rstat.rctime, bl); | |
3381 | ||
3382 | dirfragtree.encode(bl); | |
3383 | ||
3384 | ::encode(symlink, bl); | |
3385 | if (session->connection->has_feature(CEPH_FEATURE_DIRLAYOUTHASH)) { | |
3386 | ::encode(file_i->dir_layout, bl); | |
3387 | } | |
3388 | ::encode(xbl, bl); | |
3389 | if (session->connection->has_feature(CEPH_FEATURE_MDS_INLINE_DATA)) { | |
3390 | ::encode(inline_version, bl); | |
3391 | ::encode(inline_data, bl); | |
3392 | } | |
3393 | if (session->connection->has_feature(CEPH_FEATURE_MDS_QUOTA)) { | |
3394 | inode_t *policy_i = ppolicy ? pi : oi; | |
3395 | ::encode(policy_i->quota, bl); | |
3396 | } | |
3397 | if (session->connection->has_feature(CEPH_FEATURE_FS_FILE_LAYOUT_V2)) { | |
3398 | ::encode(layout.pool_ns, bl); | |
3399 | } | |
3400 | if (session->connection->has_feature(CEPH_FEATURE_FS_BTIME)) { | |
3401 | ::encode(any_i->btime, bl); | |
3402 | ::encode(any_i->change_attr, bl); | |
3403 | } | |
3404 | ||
3405 | return valid; | |
3406 | } | |
3407 | ||
3408 | void CInode::encode_cap_message(MClientCaps *m, Capability *cap) | |
3409 | { | |
3410 | assert(cap); | |
3411 | ||
3412 | client_t client = cap->get_client(); | |
3413 | ||
3414 | bool pfile = filelock.is_xlocked_by_client(client) || (cap->issued() & CEPH_CAP_FILE_EXCL); | |
3415 | bool pauth = authlock.is_xlocked_by_client(client); | |
3416 | bool plink = linklock.is_xlocked_by_client(client); | |
3417 | bool pxattr = xattrlock.is_xlocked_by_client(client); | |
3418 | ||
3419 | inode_t *oi = &inode; | |
3420 | inode_t *pi = get_projected_inode(); | |
3421 | inode_t *i = (pfile|pauth|plink|pxattr) ? pi : oi; | |
3422 | ||
3423 | dout(20) << "encode_cap_message pfile " << pfile | |
3424 | << " pauth " << pauth << " plink " << plink << " pxattr " << pxattr | |
3425 | << " ctime " << i->ctime << dendl; | |
3426 | ||
3427 | i = pfile ? pi:oi; | |
3428 | m->set_layout(i->layout); | |
3429 | m->size = i->size; | |
3430 | m->truncate_seq = i->truncate_seq; | |
3431 | m->truncate_size = i->truncate_size; | |
3432 | m->mtime = i->mtime; | |
3433 | m->atime = i->atime; | |
3434 | m->ctime = i->ctime; | |
3435 | m->change_attr = i->change_attr; | |
3436 | m->time_warp_seq = i->time_warp_seq; | |
3437 | ||
3438 | if (cap->client_inline_version < i->inline_data.version) { | |
3439 | m->inline_version = cap->client_inline_version = i->inline_data.version; | |
3440 | if (i->inline_data.length() > 0) | |
3441 | m->inline_data = i->inline_data.get_data(); | |
3442 | } else { | |
3443 | m->inline_version = 0; | |
3444 | } | |
3445 | ||
3446 | // max_size is min of projected, actual. | |
3447 | uint64_t oldms = oi->client_ranges.count(client) ? oi->client_ranges[client].range.last : 0; | |
3448 | uint64_t newms = pi->client_ranges.count(client) ? pi->client_ranges[client].range.last : 0; | |
3449 | m->max_size = MIN(oldms, newms); | |
3450 | ||
3451 | i = pauth ? pi:oi; | |
3452 | m->head.mode = i->mode; | |
3453 | m->head.uid = i->uid; | |
3454 | m->head.gid = i->gid; | |
3455 | ||
3456 | i = plink ? pi:oi; | |
3457 | m->head.nlink = i->nlink; | |
3458 | ||
3459 | i = pxattr ? pi:oi; | |
3460 | map<string,bufferptr> *ix = pxattr ? get_projected_xattrs() : &xattrs; | |
3461 | if ((cap->pending() & CEPH_CAP_XATTR_SHARED) && | |
3462 | i->xattr_version > cap->client_xattr_version) { | |
3463 | dout(10) << " including xattrs v " << i->xattr_version << dendl; | |
3464 | ::encode(*ix, m->xattrbl); | |
3465 | m->head.xattr_version = i->xattr_version; | |
3466 | cap->client_xattr_version = i->xattr_version; | |
3467 | } | |
3468 | } | |
3469 | ||
3470 | ||
3471 | ||
3472 | void CInode::_encode_base(bufferlist& bl, uint64_t features) | |
3473 | { | |
3474 | ::encode(first, bl); | |
3475 | ::encode(inode, bl, features); | |
3476 | ::encode(symlink, bl); | |
3477 | ::encode(dirfragtree, bl); | |
3478 | ::encode(xattrs, bl); | |
3479 | ::encode(old_inodes, bl, features); | |
3480 | ::encode(damage_flags, bl); | |
3481 | encode_snap(bl); | |
3482 | } | |
3483 | void CInode::_decode_base(bufferlist::iterator& p) | |
3484 | { | |
3485 | ::decode(first, p); | |
3486 | ::decode(inode, p); | |
3487 | ::decode(symlink, p); | |
3488 | ::decode(dirfragtree, p); | |
3489 | ::decode(xattrs, p); | |
3490 | ::decode(old_inodes, p); | |
3491 | ::decode(damage_flags, p); | |
3492 | decode_snap(p); | |
3493 | } | |
3494 | ||
3495 | void CInode::_encode_locks_full(bufferlist& bl) | |
3496 | { | |
3497 | ::encode(authlock, bl); | |
3498 | ::encode(linklock, bl); | |
3499 | ::encode(dirfragtreelock, bl); | |
3500 | ::encode(filelock, bl); | |
3501 | ::encode(xattrlock, bl); | |
3502 | ::encode(snaplock, bl); | |
3503 | ::encode(nestlock, bl); | |
3504 | ::encode(flocklock, bl); | |
3505 | ::encode(policylock, bl); | |
3506 | ||
3507 | ::encode(loner_cap, bl); | |
3508 | } | |
3509 | void CInode::_decode_locks_full(bufferlist::iterator& p) | |
3510 | { | |
3511 | ::decode(authlock, p); | |
3512 | ::decode(linklock, p); | |
3513 | ::decode(dirfragtreelock, p); | |
3514 | ::decode(filelock, p); | |
3515 | ::decode(xattrlock, p); | |
3516 | ::decode(snaplock, p); | |
3517 | ::decode(nestlock, p); | |
3518 | ::decode(flocklock, p); | |
3519 | ::decode(policylock, p); | |
3520 | ||
3521 | ::decode(loner_cap, p); | |
3522 | set_loner_cap(loner_cap); | |
3523 | want_loner_cap = loner_cap; // for now, we'll eval() shortly. | |
3524 | } | |
3525 | ||
3526 | void CInode::_encode_locks_state_for_replica(bufferlist& bl) | |
3527 | { | |
3528 | authlock.encode_state_for_replica(bl); | |
3529 | linklock.encode_state_for_replica(bl); | |
3530 | dirfragtreelock.encode_state_for_replica(bl); | |
3531 | filelock.encode_state_for_replica(bl); | |
3532 | nestlock.encode_state_for_replica(bl); | |
3533 | xattrlock.encode_state_for_replica(bl); | |
3534 | snaplock.encode_state_for_replica(bl); | |
3535 | flocklock.encode_state_for_replica(bl); | |
3536 | policylock.encode_state_for_replica(bl); | |
3537 | } | |
3538 | void CInode::_encode_locks_state_for_rejoin(bufferlist& bl, int rep) | |
3539 | { | |
3540 | authlock.encode_state_for_replica(bl); | |
3541 | linklock.encode_state_for_replica(bl); | |
3542 | dirfragtreelock.encode_state_for_rejoin(bl, rep); | |
3543 | filelock.encode_state_for_rejoin(bl, rep); | |
3544 | nestlock.encode_state_for_rejoin(bl, rep); | |
3545 | xattrlock.encode_state_for_replica(bl); | |
3546 | snaplock.encode_state_for_replica(bl); | |
3547 | flocklock.encode_state_for_replica(bl); | |
3548 | policylock.encode_state_for_replica(bl); | |
3549 | } | |
3550 | void CInode::_decode_locks_state(bufferlist::iterator& p, bool is_new) | |
3551 | { | |
3552 | authlock.decode_state(p, is_new); | |
3553 | linklock.decode_state(p, is_new); | |
3554 | dirfragtreelock.decode_state(p, is_new); | |
3555 | filelock.decode_state(p, is_new); | |
3556 | nestlock.decode_state(p, is_new); | |
3557 | xattrlock.decode_state(p, is_new); | |
3558 | snaplock.decode_state(p, is_new); | |
3559 | flocklock.decode_state(p, is_new); | |
3560 | policylock.decode_state(p, is_new); | |
3561 | } | |
3562 | void CInode::_decode_locks_rejoin(bufferlist::iterator& p, list<MDSInternalContextBase*>& waiters, | |
3563 | list<SimpleLock*>& eval_locks) | |
3564 | { | |
3565 | authlock.decode_state_rejoin(p, waiters); | |
3566 | linklock.decode_state_rejoin(p, waiters); | |
3567 | dirfragtreelock.decode_state_rejoin(p, waiters); | |
3568 | filelock.decode_state_rejoin(p, waiters); | |
3569 | nestlock.decode_state_rejoin(p, waiters); | |
3570 | xattrlock.decode_state_rejoin(p, waiters); | |
3571 | snaplock.decode_state_rejoin(p, waiters); | |
3572 | flocklock.decode_state_rejoin(p, waiters); | |
3573 | policylock.decode_state_rejoin(p, waiters); | |
3574 | ||
3575 | if (!dirfragtreelock.is_stable() && !dirfragtreelock.is_wrlocked()) | |
3576 | eval_locks.push_back(&dirfragtreelock); | |
3577 | if (!filelock.is_stable() && !filelock.is_wrlocked()) | |
3578 | eval_locks.push_back(&filelock); | |
3579 | if (!nestlock.is_stable() && !nestlock.is_wrlocked()) | |
3580 | eval_locks.push_back(&nestlock); | |
3581 | } | |
3582 | ||
3583 | ||
3584 | // IMPORT/EXPORT | |
3585 | ||
3586 | void CInode::encode_export(bufferlist& bl) | |
3587 | { | |
3588 | ENCODE_START(5, 4, bl); | |
3589 | _encode_base(bl, mdcache->mds->mdsmap->get_up_features()); | |
3590 | ||
3591 | ::encode(state, bl); | |
3592 | ||
3593 | ::encode(pop, bl); | |
3594 | ||
3595 | ::encode(replica_map, bl); | |
3596 | ||
3597 | // include scatterlock info for any bounding CDirs | |
3598 | bufferlist bounding; | |
3599 | if (inode.is_dir()) | |
3600 | for (compact_map<frag_t,CDir*>::iterator p = dirfrags.begin(); | |
3601 | p != dirfrags.end(); | |
3602 | ++p) { | |
3603 | CDir *dir = p->second; | |
3604 | if (dir->state_test(CDir::STATE_EXPORTBOUND)) { | |
3605 | ::encode(p->first, bounding); | |
3606 | ::encode(dir->fnode.fragstat, bounding); | |
3607 | ::encode(dir->fnode.accounted_fragstat, bounding); | |
3608 | ::encode(dir->fnode.rstat, bounding); | |
3609 | ::encode(dir->fnode.accounted_rstat, bounding); | |
3610 | dout(10) << " encoded fragstat/rstat info for " << *dir << dendl; | |
3611 | } | |
3612 | } | |
3613 | ::encode(bounding, bl); | |
3614 | ||
3615 | _encode_locks_full(bl); | |
3616 | ||
3617 | _encode_file_locks(bl); | |
3618 | ||
3619 | ENCODE_FINISH(bl); | |
3620 | ||
3621 | get(PIN_TEMPEXPORTING); | |
3622 | } | |
3623 | ||
3624 | void CInode::finish_export(utime_t now) | |
3625 | { | |
3626 | state &= MASK_STATE_EXPORT_KEPT; | |
3627 | ||
3628 | pop.zero(now); | |
3629 | ||
3630 | // just in case! | |
3631 | //dirlock.clear_updated(); | |
3632 | ||
3633 | loner_cap = -1; | |
3634 | ||
3635 | put(PIN_TEMPEXPORTING); | |
3636 | } | |
3637 | ||
3638 | void CInode::decode_import(bufferlist::iterator& p, | |
3639 | LogSegment *ls) | |
3640 | { | |
3641 | DECODE_START(5, p); | |
3642 | ||
3643 | _decode_base(p); | |
3644 | ||
3645 | unsigned s; | |
3646 | ::decode(s, p); | |
3647 | state_set(STATE_AUTH | (s & MASK_STATE_EXPORTED)); | |
3648 | ||
3649 | if (is_dirty()) { | |
3650 | get(PIN_DIRTY); | |
3651 | _mark_dirty(ls); | |
3652 | } | |
3653 | if (is_dirty_parent()) { | |
3654 | get(PIN_DIRTYPARENT); | |
3655 | _mark_dirty_parent(ls); | |
3656 | } | |
3657 | ||
3658 | ::decode(pop, ceph_clock_now(), p); | |
3659 | ||
3660 | ::decode(replica_map, p); | |
3661 | if (!replica_map.empty()) | |
3662 | get(PIN_REPLICATED); | |
3663 | replica_nonce = 0; | |
3664 | ||
3665 | // decode fragstat info on bounding cdirs | |
3666 | bufferlist bounding; | |
3667 | ::decode(bounding, p); | |
3668 | bufferlist::iterator q = bounding.begin(); | |
3669 | while (!q.end()) { | |
3670 | frag_t fg; | |
3671 | ::decode(fg, q); | |
3672 | CDir *dir = get_dirfrag(fg); | |
3673 | assert(dir); // we should have all bounds open | |
3674 | ||
3675 | // Only take the remote's fragstat/rstat if we are non-auth for | |
3676 | // this dirfrag AND the lock is NOT in a scattered (MIX) state. | |
3677 | // We know lock is stable, and MIX is the only state in which | |
3678 | // the inode auth (who sent us this data) may not have the best | |
3679 | // info. | |
3680 | ||
3681 | // HMM: Are there cases where dir->is_auth() is an insufficient | |
3682 | // check because the dirfrag is under migration? That implies | |
3683 | // it is frozen (and in a SYNC or LOCK state). FIXME. | |
3684 | ||
3685 | if (dir->is_auth() || | |
3686 | filelock.get_state() == LOCK_MIX) { | |
3687 | dout(10) << " skipped fragstat info for " << *dir << dendl; | |
3688 | frag_info_t f; | |
3689 | ::decode(f, q); | |
3690 | ::decode(f, q); | |
3691 | } else { | |
3692 | ::decode(dir->fnode.fragstat, q); | |
3693 | ::decode(dir->fnode.accounted_fragstat, q); | |
3694 | dout(10) << " took fragstat info for " << *dir << dendl; | |
3695 | } | |
3696 | if (dir->is_auth() || | |
3697 | nestlock.get_state() == LOCK_MIX) { | |
3698 | dout(10) << " skipped rstat info for " << *dir << dendl; | |
3699 | nest_info_t n; | |
3700 | ::decode(n, q); | |
3701 | ::decode(n, q); | |
3702 | } else { | |
3703 | ::decode(dir->fnode.rstat, q); | |
3704 | ::decode(dir->fnode.accounted_rstat, q); | |
3705 | dout(10) << " took rstat info for " << *dir << dendl; | |
3706 | } | |
3707 | } | |
3708 | ||
3709 | _decode_locks_full(p); | |
3710 | ||
3711 | _decode_file_locks(p); | |
3712 | ||
3713 | DECODE_FINISH(p); | |
3714 | } | |
3715 | ||
3716 | ||
3717 | void InodeStoreBase::dump(Formatter *f) const | |
3718 | { | |
3719 | inode.dump(f); | |
3720 | f->dump_string("symlink", symlink); | |
3721 | f->open_array_section("old_inodes"); | |
3722 | for (compact_map<snapid_t, old_inode_t>::const_iterator i = old_inodes.begin(); | |
3723 | i != old_inodes.end(); ++i) { | |
3724 | f->open_object_section("old_inode"); | |
3725 | { | |
3726 | // The key is the last snapid, the first is in the old_inode_t | |
3727 | f->dump_int("last", i->first); | |
3728 | i->second.dump(f); | |
3729 | } | |
3730 | f->close_section(); // old_inode | |
3731 | } | |
3732 | f->close_section(); // old_inodes | |
3733 | ||
3734 | f->open_object_section("dirfragtree"); | |
3735 | dirfragtree.dump(f); | |
3736 | f->close_section(); // dirfragtree | |
3737 | } | |
3738 | ||
3739 | ||
3740 | void InodeStore::generate_test_instances(list<InodeStore*> &ls) | |
3741 | { | |
3742 | InodeStore *populated = new InodeStore; | |
3743 | populated->inode.ino = 0xdeadbeef; | |
3744 | populated->symlink = "rhubarb"; | |
3745 | ls.push_back(populated); | |
3746 | } | |
3747 | ||
3748 | void CInode::validate_disk_state(CInode::validated_data *results, | |
3749 | MDSInternalContext *fin) | |
3750 | { | |
3751 | class ValidationContinuation : public MDSContinuation { | |
3752 | public: | |
3753 | MDSInternalContext *fin; | |
3754 | CInode *in; | |
3755 | CInode::validated_data *results; | |
3756 | bufferlist bl; | |
3757 | CInode *shadow_in; | |
3758 | ||
3759 | enum { | |
3760 | START = 0, | |
3761 | BACKTRACE, | |
3762 | INODE, | |
3763 | DIRFRAGS | |
3764 | }; | |
3765 | ||
3766 | ValidationContinuation(CInode *i, | |
3767 | CInode::validated_data *data_r, | |
3768 | MDSInternalContext *fin_) : | |
3769 | MDSContinuation(i->mdcache->mds->server), | |
3770 | fin(fin_), | |
3771 | in(i), | |
3772 | results(data_r), | |
3773 | shadow_in(NULL) { | |
3774 | set_callback(START, static_cast<Continuation::stagePtr>(&ValidationContinuation::_start)); | |
3775 | set_callback(BACKTRACE, static_cast<Continuation::stagePtr>(&ValidationContinuation::_backtrace)); | |
3776 | set_callback(INODE, static_cast<Continuation::stagePtr>(&ValidationContinuation::_inode_disk)); | |
3777 | set_callback(DIRFRAGS, static_cast<Continuation::stagePtr>(&ValidationContinuation::_dirfrags)); | |
3778 | } | |
3779 | ||
3780 | ~ValidationContinuation() override { | |
3781 | delete shadow_in; | |
3782 | } | |
3783 | ||
3784 | /** | |
3785 | * Fetch backtrace and set tag if tag is non-empty | |
3786 | */ | |
3787 | void fetch_backtrace_and_tag(CInode *in, std::string tag, | |
3788 | Context *fin, int *bt_r, bufferlist *bt) | |
3789 | { | |
3790 | const int64_t pool = in->get_backtrace_pool(); | |
3791 | object_t oid = CInode::get_object_name(in->ino(), frag_t(), ""); | |
3792 | ||
3793 | ObjectOperation fetch; | |
3794 | fetch.getxattr("parent", bt, bt_r); | |
3795 | in->mdcache->mds->objecter->read(oid, object_locator_t(pool), fetch, CEPH_NOSNAP, | |
3796 | NULL, 0, fin); | |
3797 | if (!tag.empty()) { | |
3798 | ObjectOperation scrub_tag; | |
3799 | bufferlist tag_bl; | |
3800 | ::encode(tag, tag_bl); | |
3801 | scrub_tag.setxattr("scrub_tag", tag_bl); | |
3802 | SnapContext snapc; | |
3803 | in->mdcache->mds->objecter->mutate(oid, object_locator_t(pool), scrub_tag, snapc, | |
3804 | ceph::real_clock::now(), | |
3805 | 0, NULL); | |
3806 | } | |
3807 | } | |
3808 | ||
3809 | bool _start(int rval) { | |
3810 | if (in->is_dirty()) { | |
3811 | MDCache *mdcache = in->mdcache; | |
3812 | inode_t& inode = in->inode; | |
3813 | dout(20) << "validating a dirty CInode; results will be inconclusive" | |
3814 | << dendl; | |
3815 | } | |
3816 | if (in->is_symlink()) { | |
3817 | // there's nothing to do for symlinks! | |
3818 | return true; | |
3819 | } | |
3820 | ||
3821 | C_OnFinisher *conf = new C_OnFinisher(get_io_callback(BACKTRACE), | |
3822 | in->mdcache->mds->finisher); | |
3823 | ||
3824 | // Whether we have a tag to apply depends on ScrubHeader (if one is | |
3825 | // present) | |
3826 | if (in->scrub_infop) { | |
3827 | // I'm a non-orphan, so look up my ScrubHeader via my linkage | |
3828 | const std::string &tag = in->scrub_infop->header->get_tag(); | |
3829 | // Rather than using the usual CInode::fetch_backtrace, | |
3830 | // use a special variant that optionally writes a tag in the same | |
3831 | // operation. | |
3832 | fetch_backtrace_and_tag(in, tag, conf, | |
3833 | &results->backtrace.ondisk_read_retval, &bl); | |
3834 | } else { | |
3835 | // When we're invoked outside of ScrubStack we might be called | |
3836 | // on an orphaned inode like / | |
3837 | fetch_backtrace_and_tag(in, {}, conf, | |
3838 | &results->backtrace.ondisk_read_retval, &bl); | |
3839 | } | |
3840 | return false; | |
3841 | } | |
3842 | ||
3843 | bool _backtrace(int rval) { | |
3844 | // set up basic result reporting and make sure we got the data | |
3845 | results->performed_validation = true; // at least, some of it! | |
3846 | results->backtrace.checked = true; | |
3847 | ||
3848 | const int64_t pool = in->get_backtrace_pool(); | |
3849 | inode_backtrace_t& memory_backtrace = results->backtrace.memory_value; | |
3850 | in->build_backtrace(pool, memory_backtrace); | |
3851 | bool equivalent, divergent; | |
3852 | int memory_newer; | |
3853 | ||
3854 | MDCache *mdcache = in->mdcache; // For the benefit of dout | |
3855 | const inode_t& inode = in->inode; // For the benefit of dout | |
3856 | ||
3857 | // Ignore rval because it's the result of a FAILOK operation | |
3858 | // from fetch_backtrace_and_tag: the real result is in | |
3859 | // backtrace.ondisk_read_retval | |
3860 | dout(20) << "ondisk_read_retval: " << results->backtrace.ondisk_read_retval << dendl; | |
3861 | if (results->backtrace.ondisk_read_retval != 0) { | |
3862 | results->backtrace.error_str << "failed to read off disk; see retval"; | |
3863 | goto next; | |
3864 | } | |
3865 | ||
3866 | // extract the backtrace, and compare it to a newly-constructed one | |
3867 | try { | |
3868 | bufferlist::iterator p = bl.begin(); | |
3869 | ::decode(results->backtrace.ondisk_value, p); | |
3870 | dout(10) << "decoded " << bl.length() << " bytes of backtrace successfully" << dendl; | |
3871 | } catch (buffer::error&) { | |
3872 | if (results->backtrace.ondisk_read_retval == 0 && rval != 0) { | |
3873 | // Cases where something has clearly gone wrong with the overall | |
3874 | // fetch op, though we didn't get a nonzero rc from the getxattr | |
3875 | // operation. e.g. object missing. | |
3876 | results->backtrace.ondisk_read_retval = rval; | |
3877 | } | |
3878 | results->backtrace.error_str << "failed to decode on-disk backtrace (" | |
3879 | << bl.length() << " bytes)!"; | |
3880 | goto next; | |
3881 | } | |
3882 | ||
3883 | memory_newer = memory_backtrace.compare(results->backtrace.ondisk_value, | |
3884 | &equivalent, &divergent); | |
3885 | ||
3886 | if (divergent || memory_newer < 0) { | |
3887 | // we're divergent, or on-disk version is newer | |
3888 | results->backtrace.error_str << "On-disk backtrace is divergent or newer"; | |
3889 | } else { | |
3890 | results->backtrace.passed = true; | |
3891 | } | |
3892 | next: | |
3893 | ||
3894 | if (!results->backtrace.passed && in->scrub_infop->header->get_repair()) { | |
3895 | std::string path; | |
3896 | in->make_path_string(path); | |
3897 | in->mdcache->mds->clog->warn() << "bad backtrace on inode " << *in | |
3898 | << ", rewriting it at " << path; | |
3899 | in->_mark_dirty_parent(in->mdcache->mds->mdlog->get_current_segment(), | |
3900 | false); | |
3901 | } | |
3902 | ||
3903 | // If the inode's number was free in the InoTable, fix that | |
3904 | // (#15619) | |
3905 | { | |
3906 | InoTable *inotable = mdcache->mds->inotable; | |
3907 | ||
3908 | dout(10) << "scrub: inotable ino = 0x" << std::hex << inode.ino << dendl; | |
3909 | dout(10) << "scrub: inotable free says " | |
3910 | << inotable->is_marked_free(inode.ino) << dendl; | |
3911 | ||
3912 | if (inotable->is_marked_free(inode.ino)) { | |
3913 | LogChannelRef clog = in->mdcache->mds->clog; | |
3914 | clog->error() << "scrub: inode wrongly marked free: 0x" << std::hex | |
3915 | << inode.ino; | |
3916 | ||
3917 | if (in->scrub_infop->header->get_repair()) { | |
3918 | bool repaired = inotable->repair(inode.ino); | |
3919 | if (repaired) { | |
3920 | clog->error() << "inode table repaired for inode: 0x" << std::hex | |
3921 | << inode.ino; | |
3922 | ||
3923 | inotable->save(); | |
3924 | } else { | |
3925 | clog->error() << "Cannot repair inotable while other operations" | |
3926 | " are in progress"; | |
3927 | } | |
3928 | } | |
3929 | } | |
3930 | } | |
3931 | ||
3932 | // quit if we're a file, or kick off directory checks otherwise | |
3933 | // TODO: validate on-disk inode for non-base directories | |
3934 | if (!in->is_dir()) { | |
3935 | return true; | |
3936 | } | |
3937 | ||
3938 | return validate_directory_data(); | |
3939 | } | |
3940 | ||
3941 | bool validate_directory_data() { | |
3942 | assert(in->is_dir()); | |
3943 | ||
3944 | if (in->is_base()) { | |
3945 | shadow_in = new CInode(in->mdcache); | |
3946 | in->mdcache->create_unlinked_system_inode(shadow_in, | |
3947 | in->inode.ino, | |
3948 | in->inode.mode); | |
3949 | shadow_in->fetch(get_internal_callback(INODE)); | |
3950 | return false; | |
3951 | } else { | |
3952 | results->inode.passed = true; | |
3953 | return check_dirfrag_rstats(); | |
3954 | } | |
3955 | } | |
3956 | ||
3957 | bool _inode_disk(int rval) { | |
3958 | results->inode.checked = true; | |
3959 | results->inode.ondisk_read_retval = rval; | |
3960 | results->inode.ondisk_value = shadow_in->inode; | |
3961 | results->inode.memory_value = in->inode; | |
3962 | ||
3963 | inode_t& si = shadow_in->inode; | |
3964 | inode_t& i = in->inode; | |
3965 | if (si.version > i.version) { | |
3966 | // uh, what? | |
3967 | results->inode.error_str << "On-disk inode is newer than in-memory one!"; | |
3968 | goto next; | |
3969 | } else { | |
3970 | bool divergent = false; | |
3971 | int r = i.compare(si, &divergent); | |
3972 | results->inode.passed = !divergent && r >= 0; | |
3973 | if (!results->inode.passed) { | |
3974 | results->inode.error_str << | |
3975 | "On-disk inode is divergent or newer than in-memory one!"; | |
3976 | goto next; | |
3977 | } | |
3978 | } | |
3979 | next: | |
3980 | return check_dirfrag_rstats(); | |
3981 | } | |
3982 | ||
3983 | bool check_dirfrag_rstats() { | |
3984 | MDSGatherBuilder gather(g_ceph_context); | |
3985 | std::list<frag_t> frags; | |
3986 | in->dirfragtree.get_leaves(frags); | |
3987 | for (list<frag_t>::iterator p = frags.begin(); | |
3988 | p != frags.end(); | |
3989 | ++p) { | |
3990 | CDir *dir = in->get_or_open_dirfrag(in->mdcache, *p); | |
3991 | dir->scrub_info(); | |
3992 | if (!dir->scrub_infop->header) | |
3993 | dir->scrub_infop->header = in->scrub_infop->header; | |
3994 | if (dir->is_complete()) { | |
3995 | dir->scrub_local(); | |
3996 | } else { | |
3997 | dir->scrub_infop->need_scrub_local = true; | |
3998 | dir->fetch(gather.new_sub(), false); | |
3999 | } | |
4000 | } | |
4001 | if (gather.has_subs()) { | |
4002 | gather.set_finisher(get_internal_callback(DIRFRAGS)); | |
4003 | gather.activate(); | |
4004 | return false; | |
4005 | } else { | |
4006 | return immediate(DIRFRAGS, 0); | |
4007 | } | |
4008 | } | |
4009 | ||
4010 | bool _dirfrags(int rval) { | |
4011 | int frags_errors = 0; | |
4012 | // basic reporting setup | |
4013 | results->raw_stats.checked = true; | |
4014 | results->raw_stats.ondisk_read_retval = rval; | |
4015 | ||
4016 | results->raw_stats.memory_value.dirstat = in->inode.dirstat; | |
4017 | results->raw_stats.memory_value.rstat = in->inode.rstat; | |
4018 | frag_info_t& dir_info = results->raw_stats.ondisk_value.dirstat; | |
4019 | nest_info_t& nest_info = results->raw_stats.ondisk_value.rstat; | |
4020 | ||
4021 | if (rval != 0) { | |
4022 | results->raw_stats.error_str << "Failed to read dirfrags off disk"; | |
4023 | goto next; | |
4024 | } | |
4025 | ||
4026 | // check each dirfrag... | |
4027 | for (compact_map<frag_t,CDir*>::iterator p = in->dirfrags.begin(); | |
4028 | p != in->dirfrags.end(); | |
4029 | ++p) { | |
4030 | CDir *dir = p->second; | |
4031 | assert(dir->get_version() > 0); | |
4032 | nest_info.add(dir->fnode.accounted_rstat); | |
4033 | dir_info.add(dir->fnode.accounted_fragstat); | |
4034 | if (dir->scrub_infop && | |
4035 | dir->scrub_infop->pending_scrub_error) { | |
4036 | dir->scrub_infop->pending_scrub_error = false; | |
4037 | if (dir->scrub_infop->header->get_repair()) { | |
4038 | results->raw_stats.error_str | |
4039 | << "dirfrag(" << p->first << ") has bad stats (will be fixed); "; | |
4040 | } else { | |
4041 | results->raw_stats.error_str | |
4042 | << "dirfrag(" << p->first << ") has bad stats; "; | |
4043 | } | |
4044 | frags_errors++; | |
4045 | } | |
4046 | } | |
4047 | nest_info.rsubdirs++; // it gets one to account for self | |
4048 | // ...and that their sum matches our inode settings | |
4049 | if (!dir_info.same_sums(in->inode.dirstat) || | |
4050 | !nest_info.same_sums(in->inode.rstat)) { | |
4051 | if (in->scrub_infop && | |
4052 | in->scrub_infop->header->get_repair()) { | |
4053 | results->raw_stats.error_str | |
4054 | << "freshly-calculated rstats don't match existing ones (will be fixed)"; | |
4055 | in->mdcache->repair_inode_stats(in); | |
4056 | } else { | |
4057 | results->raw_stats.error_str | |
4058 | << "freshly-calculated rstats don't match existing ones"; | |
4059 | } | |
4060 | goto next; | |
4061 | } | |
4062 | if (frags_errors > 0) | |
4063 | goto next; | |
4064 | ||
4065 | results->raw_stats.passed = true; | |
4066 | next: | |
4067 | return true; | |
4068 | } | |
4069 | ||
4070 | void _done() override { | |
4071 | if ((!results->raw_stats.checked || results->raw_stats.passed) && | |
4072 | (!results->backtrace.checked || results->backtrace.passed) && | |
4073 | (!results->inode.checked || results->inode.passed)) | |
4074 | results->passed_validation = true; | |
4075 | if (fin) { | |
4076 | fin->complete(get_rval()); | |
4077 | } | |
4078 | } | |
4079 | }; | |
4080 | ||
4081 | ||
4082 | dout(10) << "scrub starting validate_disk_state on " << *this << dendl; | |
4083 | ValidationContinuation *vc = new ValidationContinuation(this, | |
4084 | results, | |
4085 | fin); | |
4086 | vc->begin(); | |
4087 | } | |
4088 | ||
4089 | void CInode::validated_data::dump(Formatter *f) const | |
4090 | { | |
4091 | f->open_object_section("results"); | |
4092 | { | |
4093 | f->dump_bool("performed_validation", performed_validation); | |
4094 | f->dump_bool("passed_validation", passed_validation); | |
4095 | f->open_object_section("backtrace"); | |
4096 | { | |
4097 | f->dump_bool("checked", backtrace.checked); | |
4098 | f->dump_bool("passed", backtrace.passed); | |
4099 | f->dump_int("read_ret_val", backtrace.ondisk_read_retval); | |
4100 | f->dump_stream("ondisk_value") << backtrace.ondisk_value; | |
4101 | f->dump_stream("memoryvalue") << backtrace.memory_value; | |
4102 | f->dump_string("error_str", backtrace.error_str.str()); | |
4103 | } | |
4104 | f->close_section(); // backtrace | |
4105 | f->open_object_section("raw_stats"); | |
4106 | { | |
4107 | f->dump_bool("checked", raw_stats.checked); | |
4108 | f->dump_bool("passed", raw_stats.passed); | |
4109 | f->dump_int("read_ret_val", raw_stats.ondisk_read_retval); | |
4110 | f->dump_stream("ondisk_value.dirstat") << raw_stats.ondisk_value.dirstat; | |
4111 | f->dump_stream("ondisk_value.rstat") << raw_stats.ondisk_value.rstat; | |
4112 | f->dump_stream("memory_value.dirrstat") << raw_stats.memory_value.dirstat; | |
4113 | f->dump_stream("memory_value.rstat") << raw_stats.memory_value.rstat; | |
4114 | f->dump_string("error_str", raw_stats.error_str.str()); | |
4115 | } | |
4116 | f->close_section(); // raw_stats | |
4117 | // dump failure return code | |
4118 | int rc = 0; | |
4119 | if (backtrace.checked && backtrace.ondisk_read_retval) | |
4120 | rc = backtrace.ondisk_read_retval; | |
4121 | if (inode.checked && inode.ondisk_read_retval) | |
4122 | rc = inode.ondisk_read_retval; | |
4123 | if (raw_stats.checked && raw_stats.ondisk_read_retval) | |
4124 | rc = raw_stats.ondisk_read_retval; | |
4125 | f->dump_int("return_code", rc); | |
4126 | } | |
4127 | f->close_section(); // results | |
4128 | } | |
4129 | ||
4130 | void CInode::dump(Formatter *f) const | |
4131 | { | |
4132 | InodeStoreBase::dump(f); | |
4133 | ||
4134 | MDSCacheObject::dump(f); | |
4135 | ||
4136 | f->open_object_section("versionlock"); | |
4137 | versionlock.dump(f); | |
4138 | f->close_section(); | |
4139 | ||
4140 | f->open_object_section("authlock"); | |
4141 | authlock.dump(f); | |
4142 | f->close_section(); | |
4143 | ||
4144 | f->open_object_section("linklock"); | |
4145 | linklock.dump(f); | |
4146 | f->close_section(); | |
4147 | ||
4148 | f->open_object_section("dirfragtreelock"); | |
4149 | dirfragtreelock.dump(f); | |
4150 | f->close_section(); | |
4151 | ||
4152 | f->open_object_section("filelock"); | |
4153 | filelock.dump(f); | |
4154 | f->close_section(); | |
4155 | ||
4156 | f->open_object_section("xattrlock"); | |
4157 | xattrlock.dump(f); | |
4158 | f->close_section(); | |
4159 | ||
4160 | f->open_object_section("snaplock"); | |
4161 | snaplock.dump(f); | |
4162 | f->close_section(); | |
4163 | ||
4164 | f->open_object_section("nestlock"); | |
4165 | nestlock.dump(f); | |
4166 | f->close_section(); | |
4167 | ||
4168 | f->open_object_section("flocklock"); | |
4169 | flocklock.dump(f); | |
4170 | f->close_section(); | |
4171 | ||
4172 | f->open_object_section("policylock"); | |
4173 | policylock.dump(f); | |
4174 | f->close_section(); | |
4175 | ||
4176 | f->open_array_section("states"); | |
4177 | MDSCacheObject::dump_states(f); | |
4178 | if (state_test(STATE_EXPORTING)) | |
4179 | f->dump_string("state", "exporting"); | |
4180 | if (state_test(STATE_OPENINGDIR)) | |
4181 | f->dump_string("state", "openingdir"); | |
4182 | if (state_test(STATE_FREEZING)) | |
4183 | f->dump_string("state", "freezing"); | |
4184 | if (state_test(STATE_FROZEN)) | |
4185 | f->dump_string("state", "frozen"); | |
4186 | if (state_test(STATE_AMBIGUOUSAUTH)) | |
4187 | f->dump_string("state", "ambiguousauth"); | |
4188 | if (state_test(STATE_EXPORTINGCAPS)) | |
4189 | f->dump_string("state", "exportingcaps"); | |
4190 | if (state_test(STATE_NEEDSRECOVER)) | |
4191 | f->dump_string("state", "needsrecover"); | |
4192 | if (state_test(STATE_PURGING)) | |
4193 | f->dump_string("state", "purging"); | |
4194 | if (state_test(STATE_DIRTYPARENT)) | |
4195 | f->dump_string("state", "dirtyparent"); | |
4196 | if (state_test(STATE_DIRTYRSTAT)) | |
4197 | f->dump_string("state", "dirtyrstat"); | |
4198 | if (state_test(STATE_STRAYPINNED)) | |
4199 | f->dump_string("state", "straypinned"); | |
4200 | if (state_test(STATE_FROZENAUTHPIN)) | |
4201 | f->dump_string("state", "frozenauthpin"); | |
4202 | if (state_test(STATE_DIRTYPOOL)) | |
4203 | f->dump_string("state", "dirtypool"); | |
4204 | if (state_test(STATE_ORPHAN)) | |
4205 | f->dump_string("state", "orphan"); | |
4206 | if (state_test(STATE_MISSINGOBJS)) | |
4207 | f->dump_string("state", "missingobjs"); | |
4208 | f->close_section(); | |
4209 | ||
4210 | f->open_array_section("client_caps"); | |
4211 | for (map<client_t,Capability*>::const_iterator it = client_caps.begin(); | |
4212 | it != client_caps.end(); ++it) { | |
4213 | f->open_object_section("client_cap"); | |
4214 | f->dump_int("client_id", it->first.v); | |
4215 | f->dump_string("pending", ccap_string(it->second->pending())); | |
4216 | f->dump_string("issued", ccap_string(it->second->issued())); | |
4217 | f->dump_string("wanted", ccap_string(it->second->wanted())); | |
4218 | f->dump_string("last_sent", ccap_string(it->second->get_last_sent())); | |
4219 | f->close_section(); | |
4220 | } | |
4221 | f->close_section(); | |
4222 | ||
4223 | f->dump_int("loner", loner_cap.v); | |
4224 | f->dump_int("want_loner", want_loner_cap.v); | |
4225 | ||
4226 | f->open_array_section("mds_caps_wanted"); | |
4227 | for (compact_map<int,int>::const_iterator p = mds_caps_wanted.begin(); | |
4228 | p != mds_caps_wanted.end(); ++p) { | |
4229 | f->open_object_section("mds_cap_wanted"); | |
4230 | f->dump_int("rank", p->first); | |
4231 | f->dump_string("cap", ccap_string(p->second)); | |
4232 | f->close_section(); | |
4233 | } | |
4234 | f->close_section(); | |
4235 | } | |
4236 | ||
4237 | /****** Scrub Stuff *****/ | |
4238 | void CInode::scrub_info_create() const | |
4239 | { | |
4240 | dout(25) << __func__ << dendl; | |
4241 | assert(!scrub_infop); | |
4242 | ||
4243 | // break out of const-land to set up implicit initial state | |
4244 | CInode *me = const_cast<CInode*>(this); | |
4245 | inode_t *in = me->get_projected_inode(); | |
4246 | ||
4247 | scrub_info_t *si = new scrub_info_t(); | |
4248 | si->scrub_start_stamp = si->last_scrub_stamp = in->last_scrub_stamp; | |
4249 | si->scrub_start_version = si->last_scrub_version = in->last_scrub_version; | |
4250 | ||
4251 | me->scrub_infop = si; | |
4252 | } | |
4253 | ||
4254 | void CInode::scrub_maybe_delete_info() | |
4255 | { | |
4256 | if (scrub_infop && | |
4257 | !scrub_infop->scrub_in_progress && | |
4258 | !scrub_infop->last_scrub_dirty) { | |
4259 | delete scrub_infop; | |
4260 | scrub_infop = NULL; | |
4261 | } | |
4262 | } | |
4263 | ||
4264 | void CInode::scrub_initialize(CDentry *scrub_parent, | |
4265 | const ScrubHeaderRefConst& header, | |
4266 | MDSInternalContextBase *f) | |
4267 | { | |
4268 | dout(20) << __func__ << " with scrub_version " << get_version() << dendl; | |
4269 | assert(!scrub_is_in_progress()); | |
4270 | scrub_info(); | |
4271 | if (!scrub_infop) | |
4272 | scrub_infop = new scrub_info_t(); | |
4273 | ||
4274 | if (get_projected_inode()->is_dir()) { | |
4275 | // fill in dirfrag_stamps with initial state | |
4276 | std::list<frag_t> frags; | |
4277 | dirfragtree.get_leaves(frags); | |
4278 | for (std::list<frag_t>::iterator i = frags.begin(); | |
4279 | i != frags.end(); | |
4280 | ++i) { | |
4281 | if (header->get_force()) | |
4282 | scrub_infop->dirfrag_stamps[*i].reset(); | |
4283 | else | |
4284 | scrub_infop->dirfrag_stamps[*i]; | |
4285 | } | |
4286 | } | |
4287 | ||
4288 | if (scrub_parent) | |
4289 | scrub_parent->get(CDentry::PIN_SCRUBPARENT); | |
4290 | scrub_infop->scrub_parent = scrub_parent; | |
4291 | scrub_infop->on_finish = f; | |
4292 | scrub_infop->scrub_in_progress = true; | |
4293 | scrub_infop->children_scrubbed = false; | |
4294 | scrub_infop->header = header; | |
4295 | ||
4296 | scrub_infop->scrub_start_version = get_version(); | |
4297 | scrub_infop->scrub_start_stamp = ceph_clock_now(); | |
4298 | // right now we don't handle remote inodes | |
4299 | } | |
4300 | ||
4301 | int CInode::scrub_dirfrag_next(frag_t* out_dirfrag) | |
4302 | { | |
4303 | dout(20) << __func__ << dendl; | |
4304 | assert(scrub_is_in_progress()); | |
4305 | ||
4306 | if (!is_dir()) { | |
4307 | return -ENOTDIR; | |
4308 | } | |
4309 | ||
4310 | std::map<frag_t, scrub_stamp_info_t>::iterator i = | |
4311 | scrub_infop->dirfrag_stamps.begin(); | |
4312 | ||
4313 | while (i != scrub_infop->dirfrag_stamps.end()) { | |
4314 | if (i->second.scrub_start_version < scrub_infop->scrub_start_version) { | |
4315 | i->second.scrub_start_version = get_projected_version(); | |
4316 | i->second.scrub_start_stamp = ceph_clock_now(); | |
4317 | *out_dirfrag = i->first; | |
4318 | dout(20) << " return frag " << *out_dirfrag << dendl; | |
4319 | return 0; | |
4320 | } | |
4321 | ++i; | |
4322 | } | |
4323 | ||
4324 | dout(20) << " no frags left, ENOENT " << dendl; | |
4325 | return ENOENT; | |
4326 | } | |
4327 | ||
4328 | void CInode::scrub_dirfrags_scrubbing(list<frag_t>* out_dirfrags) | |
4329 | { | |
4330 | assert(out_dirfrags != NULL); | |
4331 | assert(scrub_infop != NULL); | |
4332 | ||
4333 | out_dirfrags->clear(); | |
4334 | std::map<frag_t, scrub_stamp_info_t>::iterator i = | |
4335 | scrub_infop->dirfrag_stamps.begin(); | |
4336 | ||
4337 | while (i != scrub_infop->dirfrag_stamps.end()) { | |
4338 | if (i->second.scrub_start_version >= scrub_infop->scrub_start_version) { | |
4339 | if (i->second.last_scrub_version < scrub_infop->scrub_start_version) | |
4340 | out_dirfrags->push_back(i->first); | |
4341 | } else { | |
4342 | return; | |
4343 | } | |
4344 | ||
4345 | ++i; | |
4346 | } | |
4347 | } | |
4348 | ||
4349 | void CInode::scrub_dirfrag_finished(frag_t dirfrag) | |
4350 | { | |
4351 | dout(20) << __func__ << " on frag " << dirfrag << dendl; | |
4352 | assert(scrub_is_in_progress()); | |
4353 | ||
4354 | std::map<frag_t, scrub_stamp_info_t>::iterator i = | |
4355 | scrub_infop->dirfrag_stamps.find(dirfrag); | |
4356 | assert(i != scrub_infop->dirfrag_stamps.end()); | |
4357 | ||
4358 | scrub_stamp_info_t &si = i->second; | |
4359 | si.last_scrub_stamp = si.scrub_start_stamp; | |
4360 | si.last_scrub_version = si.scrub_start_version; | |
4361 | } | |
4362 | ||
4363 | void CInode::scrub_finished(MDSInternalContextBase **c) { | |
4364 | dout(20) << __func__ << dendl; | |
4365 | assert(scrub_is_in_progress()); | |
4366 | for (std::map<frag_t, scrub_stamp_info_t>::iterator i = | |
4367 | scrub_infop->dirfrag_stamps.begin(); | |
4368 | i != scrub_infop->dirfrag_stamps.end(); | |
4369 | ++i) { | |
4370 | if(i->second.last_scrub_version != i->second.scrub_start_version) { | |
4371 | derr << i->second.last_scrub_version << " != " | |
4372 | << i->second.scrub_start_version << dendl; | |
4373 | } | |
4374 | assert(i->second.last_scrub_version == i->second.scrub_start_version); | |
4375 | } | |
4376 | ||
4377 | scrub_infop->last_scrub_version = scrub_infop->scrub_start_version; | |
4378 | scrub_infop->last_scrub_stamp = scrub_infop->scrub_start_stamp; | |
4379 | scrub_infop->last_scrub_dirty = true; | |
4380 | scrub_infop->scrub_in_progress = false; | |
4381 | ||
4382 | if (scrub_infop->scrub_parent) { | |
4383 | CDentry *dn = scrub_infop->scrub_parent; | |
4384 | scrub_infop->scrub_parent = NULL; | |
4385 | dn->dir->scrub_dentry_finished(dn); | |
4386 | dn->put(CDentry::PIN_SCRUBPARENT); | |
4387 | } | |
4388 | ||
4389 | *c = scrub_infop->on_finish; | |
4390 | scrub_infop->on_finish = NULL; | |
4391 | ||
4392 | if (scrub_infop->header->get_origin() == this) { | |
4393 | // We are at the point that a tagging scrub was initiated | |
4394 | LogChannelRef clog = mdcache->mds->clog; | |
4395 | clog->info() << "scrub complete with tag '" << scrub_infop->header->get_tag() << "'"; | |
4396 | } | |
4397 | } | |
4398 | ||
4399 | int64_t CInode::get_backtrace_pool() const | |
4400 | { | |
4401 | if (is_dir()) { | |
4402 | return mdcache->mds->mdsmap->get_metadata_pool(); | |
4403 | } else { | |
4404 | // Files are required to have an explicit layout that specifies | |
4405 | // a pool | |
4406 | assert(inode.layout.pool_id != -1); | |
4407 | return inode.layout.pool_id; | |
4408 | } | |
4409 | } | |
4410 | ||
31f18b77 FG |
4411 | void CInode::maybe_export_pin(bool update) |
4412 | { | |
4413 | if (!g_conf->mds_bal_export_pin) | |
4414 | return; | |
4415 | if (!is_dir() || !is_normal()) | |
4416 | return; | |
7c673cae | 4417 | |
31f18b77 FG |
4418 | mds_rank_t export_pin = get_export_pin(false); |
4419 | if (export_pin == MDS_RANK_NONE && !update) | |
4420 | return; | |
7c673cae | 4421 | |
31f18b77 FG |
4422 | if (state_test(CInode::STATE_QUEUEDEXPORTPIN)) |
4423 | return; | |
4424 | ||
4425 | bool queue = false; | |
4426 | for (auto p = dirfrags.begin(); p != dirfrags.end(); p++) { | |
4427 | CDir *dir = p->second; | |
4428 | if (!dir->is_auth()) | |
4429 | continue; | |
4430 | if (export_pin != MDS_RANK_NONE) { | |
4431 | if (dir->is_subtree_root()) { | |
4432 | // set auxsubtree bit or export it | |
4433 | if (!dir->state_test(CDir::STATE_AUXSUBTREE) || | |
4434 | export_pin != dir->get_dir_auth().first) | |
4435 | queue = true; | |
4436 | } else { | |
4437 | // create aux subtree or export it | |
4438 | queue = true; | |
7c673cae | 4439 | } |
31f18b77 FG |
4440 | } else { |
4441 | // clear aux subtrees ? | |
4442 | queue = dir->state_test(CDir::STATE_AUXSUBTREE); | |
4443 | } | |
4444 | if (queue) { | |
4445 | state_set(CInode::STATE_QUEUEDEXPORTPIN); | |
7c673cae | 4446 | mdcache->export_pin_queue.insert(this); |
31f18b77 | 4447 | break; |
7c673cae FG |
4448 | } |
4449 | } | |
4450 | } | |
4451 | ||
4452 | void CInode::set_export_pin(mds_rank_t rank) | |
4453 | { | |
4454 | assert(is_dir()); | |
4455 | assert(is_projected()); | |
4456 | get_projected_inode()->export_pin = rank; | |
31f18b77 | 4457 | maybe_export_pin(true); |
7c673cae FG |
4458 | } |
4459 | ||
4460 | mds_rank_t CInode::get_export_pin(bool inherit) const | |
4461 | { | |
4462 | /* An inode that is export pinned may not necessarily be a subtree root, we | |
4463 | * need to traverse the parents. A base or system inode cannot be pinned. | |
4464 | * N.B. inodes not yet linked into a dir (i.e. anonymous inodes) will not | |
4465 | * have a parent yet. | |
4466 | */ | |
4467 | for (const CInode *in = this; !in->is_base() && !in->is_system() && in->get_projected_parent_dn(); in = in->get_projected_parent_dn()->dir->inode) { | |
4468 | mds_rank_t pin = in->get_projected_inode()->export_pin; | |
4469 | if (pin >= 0) { | |
4470 | return pin; | |
4471 | } | |
4472 | if (!inherit) break; | |
4473 | } | |
4474 | return MDS_RANK_NONE; | |
4475 | } | |
4476 | ||
4477 | bool CInode::is_exportable(mds_rank_t dest) const | |
4478 | { | |
4479 | mds_rank_t pin = get_export_pin(); | |
4480 | if (pin == dest) { | |
4481 | return true; | |
4482 | } else if (pin >= 0) { | |
4483 | return false; | |
4484 | } else { | |
4485 | return true; | |
4486 | } | |
4487 | } |