1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2013 Inktank
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
15 #include <boost/program_options/variables_map.hpp>
16 #include <boost/program_options/parsers.hpp>
17 #include <boost/scoped_ptr.hpp>
18 #include <boost/optional.hpp>
22 #include "common/Formatter.h"
23 #include "common/errno.h"
24 #include "common/ceph_argparse.h"
26 #include "global/global_init.h"
28 #include "os/ObjectStore.h"
29 #include "os/filestore/FileJournal.h"
30 #include "os/filestore/FileStore.h"
32 #include "os/FuseStore.h"
35 #include "osd/PGLog.h"
38 #include "osd/ECUtil.h"
40 #include "json_spirit/json_spirit_value.h"
41 #include "json_spirit/json_spirit_reader.h"
43 #include "rebuild_mondb.h"
44 #include "ceph_objectstore_tool.h"
45 #include "include/compat.h"
46 #include "include/util.h"
48 namespace po
= boost::program_options
;
52 CompatSet
get_test_compat_set() {
53 CompatSet::FeatureSet ceph_osd_feature_compat
;
54 CompatSet::FeatureSet ceph_osd_feature_ro_compat
;
55 CompatSet::FeatureSet ceph_osd_feature_incompat
;
56 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_BASE
);
57 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_PGINFO
);
58 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_OLOC
);
59 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_LEC
);
60 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_CATEGORIES
);
61 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_HOBJECTPOOL
);
62 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_BIGINFO
);
63 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBINFO
);
64 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBLOG
);
66 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_SNAPMAPPER
);
67 ceph_osd_feature_incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_SHARDS
);
69 return CompatSet(ceph_osd_feature_compat
, ceph_osd_feature_ro_compat
,
70 ceph_osd_feature_incompat
);
74 const ssize_t max_read
= 1024 * 1024;
75 const int fd_none
= INT_MIN
;
79 struct action_on_object_t
{
80 virtual ~action_on_object_t() {}
81 virtual int call(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, object_info_t
&oi
) = 0;
84 int _action_on_all_objects_in_pg(ObjectStore
*store
, coll_t coll
, action_on_object_t
&action
, bool debug
)
86 unsigned LIST_AT_A_TIME
= 100;
88 while (!next
.is_max()) {
89 vector
<ghobject_t
> list
;
90 int r
= store
->collection_list(
93 ghobject_t::get_max(),
98 cerr
<< "Error listing collection: " << coll
<< ", "
99 << cpp_strerror(r
) << std::endl
;
102 for (vector
<ghobject_t
>::iterator obj
= list
.begin();
105 if (obj
->is_pgmeta())
108 if (coll
!= coll_t::meta()) {
110 r
= store
->getattr(coll
, *obj
, OI_ATTR
, attr
);
112 cerr
<< "Error getting attr on : " << make_pair(coll
, *obj
) << ", "
113 << cpp_strerror(r
) << std::endl
;
116 bufferlist::iterator bp
= attr
.begin();
121 cerr
<< "Error getting attr on : " << make_pair(coll
, *obj
) << ", "
122 << cpp_strerror(r
) << std::endl
;
126 r
= action
.call(store
, coll
, *obj
, oi
);
134 int action_on_all_objects_in_pg(ObjectStore
*store
, string pgidstr
, action_on_object_t
&action
, bool debug
)
137 // Scan collections in case this is an ec pool but no shard specified
138 unsigned scanned
= 0;
140 vector
<coll_t
> colls_to_check
;
141 vector
<coll_t
> candidates
;
142 r
= store
->list_collections(candidates
);
144 cerr
<< "Error listing collections: " << cpp_strerror(r
) << std::endl
;
147 pgid
.parse(pgidstr
.c_str());
148 for (vector
<coll_t
>::iterator i
= candidates
.begin();
149 i
!= candidates
.end();
152 if (!i
->is_pg(&cand_pgid
))
155 // If an exact match or treat no shard as any shard
156 if (cand_pgid
== pgid
||
157 (pgid
.is_no_shard() && pgid
.pgid
== cand_pgid
.pgid
)) {
158 colls_to_check
.push_back(*i
);
163 cerr
<< colls_to_check
.size() << " pgs to scan" << std::endl
;
164 for (vector
<coll_t
>::iterator i
= colls_to_check
.begin();
165 i
!= colls_to_check
.end();
168 cerr
<< "Scanning " << *i
<< ", " << scanned
<< "/"
169 << colls_to_check
.size() << " completed" << std::endl
;
170 r
= _action_on_all_objects_in_pg(store
, *i
, action
, debug
);
177 int action_on_all_objects_in_exact_pg(ObjectStore
*store
, coll_t coll
, action_on_object_t
&action
, bool debug
)
179 int r
= _action_on_all_objects_in_pg(store
, coll
, action
, debug
);
183 int _action_on_all_objects(ObjectStore
*store
, action_on_object_t
&action
, bool debug
)
185 unsigned scanned
= 0;
187 vector
<coll_t
> colls_to_check
;
188 vector
<coll_t
> candidates
;
189 r
= store
->list_collections(candidates
);
191 cerr
<< "Error listing collections: " << cpp_strerror(r
) << std::endl
;
194 for (vector
<coll_t
>::iterator i
= candidates
.begin();
195 i
!= candidates
.end();
198 colls_to_check
.push_back(*i
);
203 cerr
<< colls_to_check
.size() << " pgs to scan" << std::endl
;
204 for (vector
<coll_t
>::iterator i
= colls_to_check
.begin();
205 i
!= colls_to_check
.end();
208 cerr
<< "Scanning " << *i
<< ", " << scanned
<< "/"
209 << colls_to_check
.size() << " completed" << std::endl
;
210 r
= _action_on_all_objects_in_pg(store
, *i
, action
, debug
);
217 int action_on_all_objects(ObjectStore
*store
, action_on_object_t
&action
, bool debug
)
219 int r
= _action_on_all_objects(store
, action
, debug
);
223 struct pgid_object_list
{
224 list
<pair
<coll_t
, ghobject_t
> > _objects
;
226 void insert(coll_t coll
, ghobject_t
&ghobj
) {
227 _objects
.push_back(make_pair(coll
, ghobj
));
230 void dump(Formatter
*f
, bool human_readable
) const {
232 f
->open_array_section("pgid_objects");
233 for (list
<pair
<coll_t
, ghobject_t
> >::const_iterator i
= _objects
.begin();
236 f
->open_array_section("pgid_object");
238 bool is_pg
= i
->first
.is_pg(&pgid
);
240 f
->dump_string("pgid", stringify(pgid
));
241 if (!is_pg
|| !human_readable
)
242 f
->dump_string("coll", i
->first
.to_str());
243 f
->open_object_section("ghobject");
247 if (human_readable
) {
252 if (!human_readable
) {
260 struct lookup_ghobject
: public action_on_object_t
{
261 pgid_object_list _objects
;
263 const boost::optional
<std::string
> _namespace
;
266 lookup_ghobject(const string
& name
, const boost::optional
<std::string
>& nspace
, bool need_snapset
= false) : _name(name
),
267 _namespace(nspace
), _need_snapset(need_snapset
) { }
269 int call(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, object_info_t
&oi
) override
{
270 if (_need_snapset
&& !ghobj
.hobj
.has_snapset())
272 if ((_name
.length() == 0 || ghobj
.hobj
.oid
.name
== _name
) &&
273 (!_namespace
|| ghobj
.hobj
.nspace
== _namespace
))
274 _objects
.insert(coll
, ghobj
);
279 return _objects
._objects
.size();
282 pair
<coll_t
, ghobject_t
> pop() {
283 pair
<coll_t
, ghobject_t
> front
= _objects
._objects
.front();
284 _objects
._objects
.pop_front();
288 void dump(Formatter
*f
, bool human_readable
) const {
289 _objects
.dump(f
, human_readable
);
293 ghobject_t infos_oid
= OSD::make_infos_oid();
295 ghobject_t biginfo_oid
;
297 int file_fd
= fd_none
;
301 static int get_fd_data(int fd
, bufferlist
&bl
)
305 ssize_t bytes
= bl
.read_fd(fd
, max_read
);
307 cerr
<< "read_fd error " << cpp_strerror(bytes
) << std::endl
;
317 assert(bl
.length() == total
);
321 int get_log(ObjectStore
*fs
, __u8 struct_ver
,
322 coll_t coll
, spg_t pgid
, const pg_info_t
&info
,
323 PGLog::IndexedLog
&log
, pg_missing_t
&missing
)
327 assert(struct_ver
> 0);
328 PGLog::read_log_and_missing(fs
, coll
,
329 struct_ver
>= 8 ? coll
: coll_t::meta(),
330 struct_ver
>= 8 ? pgid
.make_pgmeta_oid() : log_oid
,
334 g_ceph_context
->_conf
->osd_ignore_stale_divergent_priors
);
335 if (debug
&& oss
.str().size())
336 cerr
<< oss
.str() << std::endl
;
338 catch (const buffer::error
&e
) {
339 cerr
<< "read_log_and_missing threw exception error " << e
.what() << std::endl
;
345 void dump_log(Formatter
*formatter
, ostream
&out
, pg_log_t
&log
,
346 pg_missing_t
&missing
)
348 formatter
->open_object_section("op_log");
349 formatter
->open_object_section("pg_log_t");
351 formatter
->close_section();
352 formatter
->flush(out
);
353 formatter
->open_object_section("pg_missing_t");
354 missing
.dump(formatter
);
355 formatter
->close_section();
356 formatter
->close_section();
357 formatter
->flush(out
);
360 //Based on part of OSD::load_pgs()
361 int finish_remove_pgs(ObjectStore
*store
)
364 int r
= store
->list_collections(ls
);
366 cerr
<< "finish_remove_pgs: failed to list pgs: " << cpp_strerror(r
)
371 for (vector
<coll_t
>::iterator it
= ls
.begin();
376 if (it
->is_temp(&pgid
) ||
377 (it
->is_pg(&pgid
) && PG::_has_removal_flag(store
, pgid
))) {
378 cout
<< "finish_remove_pgs " << *it
<< " removing " << pgid
<< std::endl
;
379 OSD::recursive_remove_collection(g_ceph_context
, store
, pgid
, *it
);
383 //cout << "finish_remove_pgs ignoring unrecognized " << *it << std::endl;
388 #pragma GCC diagnostic ignored "-Wpragmas"
389 #pragma GCC diagnostic push
390 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
392 int mark_pg_for_removal(ObjectStore
*fs
, spg_t pgid
, ObjectStore::Transaction
*t
)
394 pg_info_t
info(pgid
);
396 ghobject_t
pgmeta_oid(info
.pgid
.make_pgmeta_oid());
399 epoch_t map_epoch
= 0;
400 int r
= PG::peek_map_epoch(fs
, pgid
, &map_epoch
, &bl
);
402 cerr
<< __func__
<< " warning: peek_map_epoch reported error" << std::endl
;
403 PastIntervals past_intervals
;
405 r
= PG::read_info(fs
, pgid
, coll
, bl
, info
, past_intervals
, struct_v
);
407 cerr
<< __func__
<< " error on read_info " << cpp_strerror(r
) << std::endl
;
410 assert(struct_v
>= 8);
412 cout
<< "setting '_remove' omap key" << std::endl
;
413 map
<string
,bufferlist
> values
;
414 ::encode((char)1, values
["_remove"]);
415 t
->omap_setkeys(coll
, pgmeta_oid
, values
);
419 #pragma GCC diagnostic pop
420 #pragma GCC diagnostic warning "-Wpragmas"
422 int initiate_new_remove_pg(ObjectStore
*store
, spg_t r_pgid
,
423 ObjectStore::Sequencer
&osr
)
426 finish_remove_pgs(store
);
427 if (!store
->collection_exists(coll_t(r_pgid
)))
430 cout
<< " marking collection for removal" << std::endl
;
433 ObjectStore::Transaction rmt
;
434 int r
= mark_pg_for_removal(store
, r_pgid
, &rmt
);
438 store
->apply_transaction(&osr
, std::move(rmt
));
439 finish_remove_pgs(store
);
443 int write_info(ObjectStore::Transaction
&t
, epoch_t epoch
, pg_info_t
&info
,
444 PastIntervals
&past_intervals
)
447 coll_t
coll(info
.pgid
);
448 ghobject_t
pgmeta_oid(info
.pgid
.make_pgmeta_oid());
449 map
<string
,bufferlist
> km
;
450 pg_info_t last_written_info
;
451 int ret
= PG::_prepare_write_info(
458 if (ret
) cerr
<< "Failed to write info" << std::endl
;
459 t
.omap_setkeys(coll
, pgmeta_oid
, km
);
463 typedef map
<eversion_t
, hobject_t
> divergent_priors_t
;
465 int write_pg(ObjectStore::Transaction
&t
, epoch_t epoch
, pg_info_t
&info
,
466 pg_log_t
&log
, PastIntervals
&past_intervals
,
467 divergent_priors_t
&divergent
,
468 pg_missing_t
&missing
)
470 int ret
= write_info(t
, epoch
, info
, past_intervals
);
473 coll_t
coll(info
.pgid
);
474 map
<string
,bufferlist
> km
;
476 if (!divergent
.empty()) {
477 assert(missing
.get_items().empty());
478 PGLog::write_log_and_missing_wo_missing(
479 t
, &km
, log
, coll
, info
.pgid
.make_pgmeta_oid(), divergent
, true);
481 pg_missing_tracker_t
tmissing(missing
);
482 bool rebuilt_missing_set_with_deletes
= missing
.may_include_deletes
;
483 PGLog::write_log_and_missing(
484 t
, &km
, log
, coll
, info
.pgid
.make_pgmeta_oid(), tmissing
, true,
485 &rebuilt_missing_set_with_deletes
);
487 t
.omap_setkeys(coll
, info
.pgid
.make_pgmeta_oid(), km
);
491 int do_trim_pg_log(ObjectStore
*store
, const coll_t
&coll
,
492 pg_info_t
&info
, const spg_t
&pgid
,
493 ObjectStore::Sequencer
&osr
, epoch_t map_epoch
,
494 PastIntervals
&past_intervals
)
496 ghobject_t oid
= pgid
.make_pgmeta_oid();
498 int r
= store
->stat(coll
, oid
, &st
);
500 assert(st
.st_size
== 0);
502 cerr
<< "Log bounds are: " << "(" << info
.log_tail
<< ","
503 << info
.last_update
<< "]" << std::endl
;
505 uint64_t max_entries
= g_ceph_context
->_conf
->osd_max_pg_log_entries
;
506 if (info
.last_update
.version
- info
.log_tail
.version
<= max_entries
) {
507 cerr
<< "Log not larger than osd_max_pg_log_entries " << max_entries
<< std::endl
;
511 assert(info
.last_update
.version
> max_entries
);
512 version_t trim_to
= info
.last_update
.version
- max_entries
;
513 size_t trim_at_once
= g_ceph_context
->_conf
->osd_pg_log_trim_max
;
518 // gather keys so we can delete them in a batch without
519 // affecting the iterator
520 set
<string
> keys_to_trim
;
522 ObjectMap::ObjectMapIterator p
= store
->get_omap_iterator(coll
, oid
);
525 for (p
->seek_to_first(); p
->valid(); p
->next(false)) {
526 if (p
->key()[0] == '_')
528 if (p
->key() == "can_rollback_to")
530 if (p
->key() == "divergent_priors")
532 if (p
->key() == "rollback_info_trimmed_to")
534 if (p
->key() == "may_include_deletes_in_missing")
536 if (p
->key().substr(0, 7) == string("missing"))
538 if (p
->key().substr(0, 4) == string("dup_"))
541 bufferlist bl
= p
->value();
542 bufferlist::iterator bp
= bl
.begin();
545 e
.decode_with_checksum(bp
);
546 } catch (const buffer::error
&e
) {
547 cerr
<< "Error reading pg log entry: " << e
<< std::endl
;
550 cerr
<< "read entry " << e
<< std::endl
;
552 if (e
.version
.version
> trim_to
) {
556 keys_to_trim
.insert(p
->key());
557 new_tail
= e
.version
;
558 if (keys_to_trim
.size() >= trim_at_once
)
564 } // deconstruct ObjectMapIterator
567 if (!dry_run
&& !keys_to_trim
.empty()) {
568 cout
<< "Removing keys " << *keys_to_trim
.begin() << " - " << *keys_to_trim
.rbegin() << std::endl
;
569 ObjectStore::Transaction t
;
570 t
.omap_rmkeys(coll
, oid
, keys_to_trim
);
571 int r
= store
->apply_transaction(&osr
, std::move(t
));
573 cerr
<< "Error trimming logs " << cpp_strerror(r
) << std::endl
;
578 // update pg info with new tail
579 if (!dry_run
&& new_tail
!= eversion_t()) {
580 info
.log_tail
= new_tail
;
581 ObjectStore::Transaction t
;
582 int ret
= write_info(t
, map_epoch
, info
, past_intervals
);
585 ret
= store
->apply_transaction(&osr
, std::move(t
));
587 cerr
<< "Error updating pg info " << cpp_strerror(ret
) << std::endl
;
591 // compact the db since we just removed a bunch of data
592 cerr
<< "Finished trimming, now compacting..." << std::endl
;
598 const int OMAP_BATCH_SIZE
= 25;
599 void get_omap_batch(ObjectMap::ObjectMapIterator
&iter
, map
<string
, bufferlist
> &oset
)
602 for (int count
= OMAP_BATCH_SIZE
; count
&& iter
->valid(); --count
, iter
->next()) {
603 oset
.insert(pair
<string
, bufferlist
>(iter
->key(), iter
->value()));
607 int ObjectStoreTool::export_file(ObjectStore
*store
, coll_t cid
, ghobject_t
&obj
)
613 int ret
= store
->stat(cid
, obj
, &st
);
617 cerr
<< "Read " << obj
<< std::endl
;
621 cerr
<< "size=" << total
<< std::endl
;
623 object_begin
objb(obj
);
628 ret
= store
->getattr(cid
, obj
, OI_ATTR
, bp
);
630 cerr
<< "getattr failure object_info " << ret
<< std::endl
;
636 cerr
<< "object_info: " << objb
.oi
<< std::endl
;
639 // NOTE: we include whiteouts, lost, etc.
641 ret
= write_section(TYPE_OBJECT_BEGIN
, objb
, file_fd
);
646 bufferlist rawdatabl
;
649 mysize_t len
= max_read
;
653 ret
= store
->read(cid
, obj
, offset
, len
, rawdatabl
);
659 data_section
dblock(offset
, len
, rawdatabl
);
661 cerr
<< "data section offset=" << offset
<< " len=" << len
<< std::endl
;
666 ret
= write_section(TYPE_DATA
, dblock
, file_fd
);
670 //Handle attrs for this object
671 map
<string
,bufferptr
> aset
;
672 ret
= store
->getattrs(cid
, obj
, aset
);
674 attr_section
as(aset
);
675 ret
= write_section(TYPE_ATTRS
, as
, file_fd
);
680 cerr
<< "attrs size " << aset
.size() << std::endl
;
683 //Handle omap information
685 ret
= store
->omap_get_header(cid
, obj
, &hdrbuf
, true);
687 cerr
<< "omap_get_header: " << cpp_strerror(ret
) << std::endl
;
691 omap_hdr_section
ohs(hdrbuf
);
692 ret
= write_section(TYPE_OMAP_HDR
, ohs
, file_fd
);
696 ObjectMap::ObjectMapIterator iter
= store
->get_omap_iterator(cid
, obj
);
699 cerr
<< "omap_get_iterator: " << cpp_strerror(ret
) << std::endl
;
702 iter
->seek_to_first();
704 map
<string
, bufferlist
> out
;
705 while(iter
->valid()) {
706 get_omap_batch(iter
, out
);
708 if (out
.empty()) break;
710 mapcount
+= out
.size();
711 omap_section
oms(out
);
712 ret
= write_section(TYPE_OMAP
, oms
, file_fd
);
717 cerr
<< "omap map size " << mapcount
<< std::endl
;
719 ret
= write_simple(TYPE_OBJECT_END
, file_fd
);
726 int ObjectStoreTool::export_files(ObjectStore
*store
, coll_t coll
)
730 while (!next
.is_max()) {
731 vector
<ghobject_t
> objects
;
732 int r
= store
->collection_list(coll
, next
, ghobject_t::get_max(), 300,
736 for (vector
<ghobject_t
>::iterator i
= objects
.begin();
739 assert(!i
->hobj
.is_meta());
740 if (i
->is_pgmeta() || i
->hobj
.is_temp()) {
743 r
= export_file(store
, coll
, *i
);
751 int set_inc_osdmap(ObjectStore
*store
, epoch_t e
, bufferlist
& bl
, bool force
,
752 ObjectStore::Sequencer
&osr
) {
753 OSDMap::Incremental inc
;
754 bufferlist::iterator it
= bl
.begin();
758 } else if (e
!= inc
.epoch
) {
759 cerr
<< "incremental.epoch mismatch: "
760 << inc
.epoch
<< " != " << e
<< std::endl
;
762 cerr
<< "But will continue anyway." << std::endl
;
767 const ghobject_t inc_oid
= OSD::get_inc_osdmap_pobject_name(e
);
768 if (!store
->exists(coll_t::meta(), inc_oid
)) {
769 cerr
<< "inc-osdmap (" << inc_oid
<< ") does not exist." << std::endl
;
773 cout
<< "Creating a new epoch." << std::endl
;
777 ObjectStore::Transaction t
;
778 t
.write(coll_t::meta(), inc_oid
, 0, bl
.length(), bl
);
779 t
.truncate(coll_t::meta(), inc_oid
, bl
.length());
780 int ret
= store
->apply_transaction(&osr
, std::move(t
));
782 cerr
<< "Failed to set inc-osdmap (" << inc_oid
<< "): " << ret
<< std::endl
;
784 cout
<< "Wrote inc-osdmap." << inc
.epoch
<< std::endl
;
789 int get_inc_osdmap(ObjectStore
*store
, epoch_t e
, bufferlist
& bl
)
791 if (store
->read(coll_t::meta(),
792 OSD::get_inc_osdmap_pobject_name(e
),
799 int set_osdmap(ObjectStore
*store
, epoch_t e
, bufferlist
& bl
, bool force
,
800 ObjectStore::Sequencer
&osr
) {
804 e
= osdmap
.get_epoch();
805 } else if (e
!= osdmap
.get_epoch()) {
806 cerr
<< "osdmap.epoch mismatch: "
807 << e
<< " != " << osdmap
.get_epoch() << std::endl
;
809 cerr
<< "But will continue anyway." << std::endl
;
814 const ghobject_t full_oid
= OSD::get_osdmap_pobject_name(e
);
815 if (!store
->exists(coll_t::meta(), full_oid
)) {
816 cerr
<< "osdmap (" << full_oid
<< ") does not exist." << std::endl
;
820 cout
<< "Creating a new epoch." << std::endl
;
824 ObjectStore::Transaction t
;
825 t
.write(coll_t::meta(), full_oid
, 0, bl
.length(), bl
);
826 t
.truncate(coll_t::meta(), full_oid
, bl
.length());
827 int ret
= store
->apply_transaction(&osr
, std::move(t
));
829 cerr
<< "Failed to set osdmap (" << full_oid
<< "): " << ret
<< std::endl
;
831 cout
<< "Wrote osdmap." << osdmap
.get_epoch() << std::endl
;
836 int get_osdmap(ObjectStore
*store
, epoch_t e
, OSDMap
&osdmap
, bufferlist
& bl
)
838 bool found
= store
->read(
839 coll_t::meta(), OSD::get_osdmap_pobject_name(e
), 0, 0, bl
) >= 0;
841 cerr
<< "Can't find OSDMap for pg epoch " << e
<< std::endl
;
846 cerr
<< osdmap
<< std::endl
;
850 int add_osdmap(ObjectStore
*store
, metadata_section
&ms
)
852 return get_osdmap(store
, ms
.map_epoch
, ms
.osdmap
, ms
.osdmap_bl
);
855 int ObjectStoreTool::do_export(ObjectStore
*fs
, coll_t coll
, spg_t pgid
,
856 pg_info_t
&info
, epoch_t map_epoch
, __u8 struct_ver
,
857 const OSDSuperblock
& superblock
,
858 PastIntervals
&past_intervals
)
860 PGLog::IndexedLog log
;
861 pg_missing_t missing
;
863 cerr
<< "Exporting " << pgid
<< std::endl
;
865 int ret
= get_log(fs
, struct_ver
, coll
, pgid
, info
, log
, missing
);
870 Formatter
*formatter
= Formatter::create("json-pretty");
872 dump_log(formatter
, cerr
, log
, missing
);
877 pg_begin
pgb(pgid
, superblock
);
878 // Special case: If replicated pg don't require the importing OSD to have shard feature
879 if (pgid
.is_no_shard()) {
880 pgb
.superblock
.compat_features
.incompat
.remove(CEPH_OSD_FEATURE_INCOMPAT_SHARDS
);
882 ret
= write_section(TYPE_PG_BEGIN
, pgb
, file_fd
);
886 // The metadata_section is now before files, so import can detect
887 // errors and abort without wasting time.
895 ret
= add_osdmap(fs
, ms
);
898 ret
= write_section(TYPE_PG_METADATA
, ms
, file_fd
);
902 ret
= export_files(fs
, coll
);
904 cerr
<< "export_files error " << ret
<< std::endl
;
908 ret
= write_simple(TYPE_PG_END
, file_fd
);
915 int dump_data(Formatter
*formatter
, bufferlist
&bl
)
917 bufferlist::iterator ebliter
= bl
.begin();
921 formatter
->open_object_section("data_block");
922 formatter
->dump_unsigned("offset", ds
.offset
);
923 formatter
->dump_unsigned("len", ds
.len
);
924 // XXX: Add option to dump data like od -cx ?
925 formatter
->close_section();
926 formatter
->flush(cout
);
930 int get_data(ObjectStore
*store
, coll_t coll
, ghobject_t hoid
,
931 ObjectStore::Transaction
*t
, bufferlist
&bl
)
933 bufferlist::iterator ebliter
= bl
.begin();
938 cerr
<< "\tdata: offset " << ds
.offset
<< " len " << ds
.len
<< std::endl
;
939 t
->write(coll
, hoid
, ds
.offset
, ds
.len
, ds
.databl
);
944 Formatter
*formatter
, ghobject_t hoid
,
947 bufferlist::iterator ebliter
= bl
.begin();
951 // This could have been handled in the caller if we didn't need to
952 // support exports that didn't include object_info_t in object_begin.
953 if (hoid
.generation
== ghobject_t::NO_GEN
&&
954 hoid
.hobj
.is_head()) {
955 map
<string
,bufferlist
>::iterator mi
= as
.data
.find(SS_ATTR
);
956 if (mi
!= as
.data
.end()) {
958 auto p
= mi
->second
.begin();
960 formatter
->open_object_section("snapset");
961 snapset
.dump(formatter
);
962 formatter
->close_section();
964 formatter
->open_object_section("snapset");
965 formatter
->dump_string("error", "missing SS_ATTR");
966 formatter
->close_section();
970 formatter
->open_object_section("attrs");
971 formatter
->open_array_section("user");
972 for (auto kv
: as
.data
) {
973 // Skip system attributes
974 if (('_' != kv
.first
.at(0)) || kv
.first
.size() == 1)
976 formatter
->open_object_section("user_attr");
977 formatter
->dump_string("name", kv
.first
.substr(1));
979 formatter
->dump_string("value", cleanbin(kv
.second
, b64
));
980 formatter
->dump_bool("Base64", b64
);
981 formatter
->close_section();
983 formatter
->close_section();
984 formatter
->open_array_section("system");
985 for (auto kv
: as
.data
) {
986 // Skip user attributes
987 if (('_' == kv
.first
.at(0)) && kv
.first
.size() != 1)
989 formatter
->open_object_section("sys_attr");
990 formatter
->dump_string("name", kv
.first
);
991 formatter
->close_section();
993 formatter
->close_section();
994 formatter
->close_section();
995 formatter
->flush(cout
);
1001 ObjectStore
*store
, coll_t coll
, ghobject_t hoid
,
1002 ObjectStore::Transaction
*t
, bufferlist
&bl
,
1003 OSDriver
&driver
, SnapMapper
&snap_mapper
)
1005 bufferlist::iterator ebliter
= bl
.begin();
1010 cerr
<< "\tattrs: len " << as
.data
.size() << std::endl
;
1011 t
->setattrs(coll
, hoid
, as
.data
);
1013 // This could have been handled in the caller if we didn't need to
1014 // support exports that didn't include object_info_t in object_begin.
1015 if (hoid
.generation
== ghobject_t::NO_GEN
) {
1016 if (hoid
.hobj
.snap
< CEPH_MAXSNAP
) {
1017 map
<string
,bufferlist
>::iterator mi
= as
.data
.find(OI_ATTR
);
1018 if (mi
!= as
.data
.end()) {
1019 object_info_t
oi(mi
->second
);
1022 cerr
<< "object_info " << oi
<< std::endl
;
1024 OSDriver::OSTransaction
_t(driver
.get_transaction(t
));
1025 set
<snapid_t
> oi_snaps(oi
.legacy_snaps
.begin(), oi
.legacy_snaps
.end());
1026 if (!oi_snaps
.empty()) {
1028 cerr
<< "\tsetting legacy snaps " << oi_snaps
<< std::endl
;
1029 snap_mapper
.add_oid(hoid
.hobj
, oi_snaps
, &_t
);
1033 if (hoid
.hobj
.is_head()) {
1034 map
<string
,bufferlist
>::iterator mi
= as
.data
.find(SS_ATTR
);
1035 if (mi
!= as
.data
.end()) {
1037 auto p
= mi
->second
.begin();
1039 cout
<< "snapset " << snapset
<< std::endl
;
1040 if (!snapset
.is_legacy()) {
1041 for (auto& p
: snapset
.clone_snaps
) {
1042 ghobject_t clone
= hoid
;
1043 clone
.hobj
.snap
= p
.first
;
1044 set
<snapid_t
> snaps(p
.second
.begin(), p
.second
.end());
1045 if (!store
->exists(coll
, clone
)) {
1046 // no clone, skip. this is probably a cache pool. this works
1047 // because we use a separate transaction per object and clones
1048 // come before head in the archive.
1050 cerr
<< "\tskipping missing " << clone
<< " (snaps "
1051 << snaps
<< ")" << std::endl
;
1055 cerr
<< "\tsetting " << clone
.hobj
<< " snaps " << snaps
1057 OSDriver::OSTransaction
_t(driver
.get_transaction(t
));
1058 assert(!snaps
.empty());
1059 snap_mapper
.add_oid(clone
.hobj
, snaps
, &_t
);
1063 cerr
<< "missing SS_ATTR on " << hoid
<< std::endl
;
1072 int dump_omap_hdr(Formatter
*formatter
, bufferlist
&bl
)
1074 bufferlist::iterator ebliter
= bl
.begin();
1075 omap_hdr_section oh
;
1078 formatter
->open_object_section("omap_header");
1079 formatter
->dump_string("value", string(oh
.hdr
.c_str(), oh
.hdr
.length()));
1080 formatter
->close_section();
1081 formatter
->flush(cout
);
1085 int get_omap_hdr(ObjectStore
*store
, coll_t coll
, ghobject_t hoid
,
1086 ObjectStore::Transaction
*t
, bufferlist
&bl
)
1088 bufferlist::iterator ebliter
= bl
.begin();
1089 omap_hdr_section oh
;
1093 cerr
<< "\tomap header: " << string(oh
.hdr
.c_str(), oh
.hdr
.length())
1095 t
->omap_setheader(coll
, hoid
, oh
.hdr
);
1099 int dump_omap(Formatter
*formatter
, bufferlist
&bl
)
1101 bufferlist::iterator ebliter
= bl
.begin();
1105 formatter
->open_object_section("omaps");
1106 formatter
->dump_unsigned("count", os
.omap
.size());
1107 formatter
->open_array_section("data");
1108 for (auto o
: os
.omap
) {
1109 formatter
->open_object_section("omap");
1110 formatter
->dump_string("name", o
.first
);
1112 formatter
->dump_string("value", cleanbin(o
.second
, b64
));
1113 formatter
->dump_bool("Base64", b64
);
1114 formatter
->close_section();
1116 formatter
->close_section();
1117 formatter
->close_section();
1118 formatter
->flush(cout
);
1122 int get_omap(ObjectStore
*store
, coll_t coll
, ghobject_t hoid
,
1123 ObjectStore::Transaction
*t
, bufferlist
&bl
)
1125 bufferlist::iterator ebliter
= bl
.begin();
1130 cerr
<< "\tomap: size " << os
.omap
.size() << std::endl
;
1131 t
->omap_setkeys(coll
, hoid
, os
.omap
);
1135 int ObjectStoreTool::dump_object(Formatter
*formatter
,
1138 bufferlist::iterator ebliter
= bl
.begin();
1142 if (ob
.hoid
.hobj
.is_temp()) {
1143 cerr
<< "ERROR: Export contains temporary object '" << ob
.hoid
<< "'" << std::endl
;
1147 formatter
->open_object_section("object");
1148 formatter
->open_object_section("oid");
1149 ob
.hoid
.dump(formatter
);
1150 formatter
->close_section();
1151 formatter
->open_object_section("object_info");
1152 ob
.oi
.dump(formatter
);
1153 formatter
->close_section();
1159 int ret
= read_section(&type
, &ebl
);
1163 //cout << "\tdo_object: Section type " << hex << type << dec << std::endl;
1164 //cout << "\t\tsection size " << ebl.length() << std::endl;
1165 if (type
>= END_OF_TYPES
) {
1166 cout
<< "Skipping unknown object section type" << std::endl
;
1172 ret
= dump_data(formatter
, ebl
);
1173 if (ret
) return ret
;
1177 ret
= dump_attrs(formatter
, ob
.hoid
, ebl
);
1178 if (ret
) return ret
;
1182 ret
= dump_omap_hdr(formatter
, ebl
);
1183 if (ret
) return ret
;
1187 ret
= dump_omap(formatter
, ebl
);
1188 if (ret
) return ret
;
1190 case TYPE_OBJECT_END
:
1194 cerr
<< "Unknown section type " << type
<< std::endl
;
1198 formatter
->close_section();
1202 int ObjectStoreTool::get_object(ObjectStore
*store
, coll_t coll
,
1203 bufferlist
&bl
, OSDMap
&curmap
,
1204 bool *skipped_objects
,
1205 ObjectStore::Sequencer
&osr
)
1207 ObjectStore::Transaction tran
;
1208 ObjectStore::Transaction
*t
= &tran
;
1209 bufferlist::iterator ebliter
= bl
.begin();
1215 OSD::make_snapmapper_oid());
1217 coll
.is_pg_prefix(&pg
);
1218 SnapMapper
mapper(g_ceph_context
, &driver
, 0, 0, 0, pg
.shard
);
1220 if (ob
.hoid
.hobj
.is_temp()) {
1221 cerr
<< "ERROR: Export contains temporary object '" << ob
.hoid
<< "'" << std::endl
;
1224 assert(g_ceph_context
);
1225 if (ob
.hoid
.hobj
.nspace
!= g_ceph_context
->_conf
->osd_hit_set_namespace
) {
1226 object_t oid
= ob
.hoid
.hobj
.oid
;
1227 object_locator_t
loc(ob
.hoid
.hobj
);
1228 pg_t raw_pgid
= curmap
.object_locator_to_pg(oid
, loc
);
1229 pg_t pgid
= curmap
.raw_pg_to_pg(raw_pgid
);
1232 if (coll
.is_pg(&coll_pgid
) == false) {
1233 cerr
<< "INTERNAL ERROR: Bad collection during import" << std::endl
;
1236 if (coll_pgid
.shard
!= ob
.hoid
.shard_id
) {
1237 cerr
<< "INTERNAL ERROR: Importing shard " << coll_pgid
.shard
1238 << " but object shard is " << ob
.hoid
.shard_id
<< std::endl
;
1242 if (coll_pgid
.pgid
!= pgid
) {
1243 cerr
<< "Skipping object '" << ob
.hoid
<< "' which belongs in pg " << pgid
<< std::endl
;
1244 *skipped_objects
= true;
1251 t
->touch(coll
, ob
.hoid
);
1253 cout
<< "Write " << ob
.hoid
<< std::endl
;
1259 int ret
= read_section(&type
, &ebl
);
1263 //cout << "\tdo_object: Section type " << hex << type << dec << std::endl;
1264 //cout << "\t\tsection size " << ebl.length() << std::endl;
1265 if (type
>= END_OF_TYPES
) {
1266 cout
<< "Skipping unknown object section type" << std::endl
;
1272 ret
= get_data(store
, coll
, ob
.hoid
, t
, ebl
);
1273 if (ret
) return ret
;
1277 ret
= get_attrs(store
, coll
, ob
.hoid
, t
, ebl
, driver
, mapper
);
1278 if (ret
) return ret
;
1282 ret
= get_omap_hdr(store
, coll
, ob
.hoid
, t
, ebl
);
1283 if (ret
) return ret
;
1287 ret
= get_omap(store
, coll
, ob
.hoid
, t
, ebl
);
1288 if (ret
) return ret
;
1290 case TYPE_OBJECT_END
:
1294 cerr
<< "Unknown section type " << type
<< std::endl
;
1299 store
->apply_transaction(&osr
, std::move(*t
));
1303 int dump_pg_metadata(Formatter
*formatter
, bufferlist
&bl
, metadata_section
&ms
)
1305 bufferlist::iterator ebliter
= bl
.begin();
1308 formatter
->open_object_section("metadata_section");
1310 formatter
->dump_unsigned("pg_disk_version", (int)ms
.struct_ver
);
1311 formatter
->dump_unsigned("map_epoch", ms
.map_epoch
);
1313 formatter
->open_object_section("OSDMap");
1314 ms
.osdmap
.dump(formatter
);
1315 formatter
->close_section();
1316 formatter
->flush(cout
);
1319 formatter
->open_object_section("info");
1320 ms
.info
.dump(formatter
);
1321 formatter
->close_section();
1322 formatter
->flush(cout
);
1324 formatter
->open_object_section("log");
1325 ms
.log
.dump(formatter
);
1326 formatter
->close_section();
1327 formatter
->flush(cout
);
1329 formatter
->open_object_section("pg_missing_t");
1330 ms
.missing
.dump(formatter
);
1331 formatter
->close_section();
1333 // XXX: ms.past_intervals?
1335 formatter
->close_section();
1336 formatter
->flush(cout
);
1338 if (ms
.osdmap
.get_epoch() != 0 && ms
.map_epoch
!= ms
.osdmap
.get_epoch()) {
1339 cerr
<< "FATAL: Invalid OSDMap epoch in export data" << std::endl
;
1346 int get_pg_metadata(ObjectStore
*store
, bufferlist
&bl
, metadata_section
&ms
,
1347 const OSDSuperblock
& sb
, OSDMap
& curmap
, spg_t pgid
)
1349 bufferlist::iterator ebliter
= bl
.begin();
1351 spg_t old_pgid
= ms
.info
.pgid
;
1352 ms
.info
.pgid
= pgid
;
1355 Formatter
*formatter
= new JSONFormatter(true);
1356 cout
<< "export pgid " << old_pgid
<< std::endl
;
1357 cout
<< "struct_v " << (int)ms
.struct_ver
<< std::endl
;
1358 cout
<< "map epoch " << ms
.map_epoch
<< std::endl
;
1360 formatter
->open_object_section("importing OSDMap");
1361 ms
.osdmap
.dump(formatter
);
1362 formatter
->close_section();
1363 formatter
->flush(cout
);
1366 cout
<< "osd current epoch " << sb
.current_epoch
<< std::endl
;
1367 formatter
->open_object_section("current OSDMap");
1368 curmap
.dump(formatter
);
1369 formatter
->close_section();
1370 formatter
->flush(cout
);
1373 formatter
->open_object_section("info");
1374 ms
.info
.dump(formatter
);
1375 formatter
->close_section();
1376 formatter
->flush(cout
);
1379 formatter
->open_object_section("log");
1380 ms
.log
.dump(formatter
);
1381 formatter
->close_section();
1382 formatter
->flush(cout
);
1385 formatter
->flush(cout
);
1389 if (ms
.osdmap
.get_epoch() != 0 && ms
.map_epoch
!= ms
.osdmap
.get_epoch()) {
1390 cerr
<< "FATAL: Invalid OSDMap epoch in export data" << std::endl
;
1394 if (ms
.map_epoch
> sb
.current_epoch
) {
1395 cerr
<< "ERROR: Export PG's map_epoch " << ms
.map_epoch
<< " > OSD's epoch " << sb
.current_epoch
<< std::endl
;
1396 cerr
<< "The OSD you are using is older than the exported PG" << std::endl
;
1397 cerr
<< "Either use another OSD or join selected OSD to cluster to update it first" << std::endl
;
1401 // Pool verified to exist for call to get_pg_num().
1402 unsigned new_pg_num
= curmap
.get_pg_num(pgid
.pgid
.pool());
1404 if (pgid
.pgid
.ps() >= new_pg_num
) {
1405 cerr
<< "Illegal pgid, the seed is larger than current pg_num" << std::endl
;
1409 // Old exports didn't include OSDMap, see if we have a copy locally
1410 if (ms
.osdmap
.get_epoch() == 0) {
1412 bufferlist findmap_bl
;
1413 int ret
= get_osdmap(store
, ms
.map_epoch
, findmap
, findmap_bl
);
1415 ms
.osdmap
.deepish_copy_from(findmap
);
1417 cerr
<< "WARNING: No OSDMap in old export,"
1418 " some objects may be ignored due to a split" << std::endl
;
1422 // Make sure old_pg_num is 0 in the unusual case that OSDMap not in export
1423 // nor can we find a local copy.
1424 unsigned old_pg_num
= 0;
1425 if (ms
.osdmap
.get_epoch() != 0)
1426 old_pg_num
= ms
.osdmap
.get_pg_num(pgid
.pgid
.pool());
1429 cerr
<< "old_pg_num " << old_pg_num
<< std::endl
;
1430 cerr
<< "new_pg_num " << new_pg_num
<< std::endl
;
1431 cerr
<< ms
.osdmap
<< std::endl
;
1432 cerr
<< curmap
<< std::endl
;
1435 // If we have managed to have a good OSDMap we can do these checks
1437 if (old_pgid
.pgid
.ps() >= old_pg_num
) {
1438 cerr
<< "FATAL: pgid invalid for original map epoch" << std::endl
;
1441 if (pgid
.pgid
.ps() >= old_pg_num
) {
1442 cout
<< "NOTICE: Post split pgid specified" << std::endl
;
1445 if (parent
.is_split(old_pg_num
, new_pg_num
, NULL
)) {
1446 cerr
<< "WARNING: Split occurred, some objects may be ignored" << std::endl
;
1452 cerr
<< "Import pgid " << ms
.info
.pgid
<< std::endl
;
1453 cerr
<< "Previous past_intervals " << ms
.past_intervals
<< std::endl
;
1454 cerr
<< "history.same_interval_since " << ms
.info
.history
.same_interval_since
<< std::endl
;
1457 // Let osd recompute past_intervals and same_interval_since
1458 ms
.past_intervals
.clear();
1459 ms
.info
.history
.same_interval_since
= 0;
1462 cerr
<< "Changing pg epoch " << ms
.map_epoch
<< " to " << sb
.current_epoch
<< std::endl
;
1464 ms
.map_epoch
= sb
.current_epoch
;
1469 // out: pg_log_t that only has entries that apply to import_pgid using curmap
1470 // reject: Entries rejected from "in" are in the reject.log. Other fields not set.
1471 void filter_divergent_priors(spg_t import_pgid
, const OSDMap
&curmap
,
1472 const string
&hit_set_namespace
, const divergent_priors_t
&in
,
1473 divergent_priors_t
&out
, divergent_priors_t
&reject
)
1478 for (divergent_priors_t::const_iterator i
= in
.begin();
1479 i
!= in
.end(); ++i
) {
1481 // Reject divergent priors for temporary objects
1482 if (i
->second
.is_temp()) {
1487 if (i
->second
.nspace
!= hit_set_namespace
) {
1488 object_t oid
= i
->second
.oid
;
1489 object_locator_t
loc(i
->second
);
1490 pg_t raw_pgid
= curmap
.object_locator_to_pg(oid
, loc
);
1491 pg_t pgid
= curmap
.raw_pg_to_pg(raw_pgid
);
1493 if (import_pgid
.pgid
== pgid
) {
1504 int ObjectStoreTool::dump_import(Formatter
*formatter
)
1508 PGLog::IndexedLog log
;
1509 //bool skipped_objects = false;
1511 int ret
= read_super();
1515 if (sh
.magic
!= super_header::super_magic
) {
1516 cerr
<< "Invalid magic number" << std::endl
;
1520 if (sh
.version
> super_header::super_ver
) {
1521 cerr
<< "Can't handle export format version=" << sh
.version
<< std::endl
;
1525 formatter
->open_object_section("Export");
1527 //First section must be TYPE_PG_BEGIN
1529 ret
= read_section(&type
, &ebl
);
1532 if (type
== TYPE_POOL_BEGIN
) {
1533 cerr
<< "Dump of pool exports not supported" << std::endl
;
1535 } else if (type
!= TYPE_PG_BEGIN
) {
1536 cerr
<< "Invalid first section type " << std::to_string(type
) << std::endl
;
1540 bufferlist::iterator ebliter
= ebl
.begin();
1542 pgb
.decode(ebliter
);
1543 spg_t pgid
= pgb
.pgid
;
1545 formatter
->dump_string("pgid", stringify(pgid
));
1546 formatter
->dump_string("cluster_fsid", stringify(pgb
.superblock
.cluster_fsid
));
1547 formatter
->dump_string("features", stringify(pgb
.superblock
.compat_features
));
1550 bool found_metadata
= false;
1551 metadata_section ms
;
1552 bool objects_started
= false;
1554 ret
= read_section(&type
, &ebl
);
1559 cerr
<< "dump_import: Section type " << std::to_string(type
) << std::endl
;
1561 if (type
>= END_OF_TYPES
) {
1562 cerr
<< "Skipping unknown section type" << std::endl
;
1566 case TYPE_OBJECT_BEGIN
:
1567 if (!objects_started
) {
1568 formatter
->open_array_section("objects");
1569 objects_started
= true;
1571 ret
= dump_object(formatter
, ebl
);
1572 if (ret
) return ret
;
1574 case TYPE_PG_METADATA
:
1575 if (objects_started
)
1576 cerr
<< "WARNING: metadata_section out of order" << std::endl
;
1577 ret
= dump_pg_metadata(formatter
, ebl
, ms
);
1578 if (ret
) return ret
;
1579 found_metadata
= true;
1582 if (objects_started
) {
1583 formatter
->close_section();
1588 cerr
<< "Unknown section type " << std::to_string(type
) << std::endl
;
1593 if (!found_metadata
) {
1594 cerr
<< "Missing metadata section" << std::endl
;
1598 formatter
->close_section();
1599 formatter
->flush(cout
);
1604 int ObjectStoreTool::do_import(ObjectStore
*store
, OSDSuperblock
& sb
,
1605 bool force
, std::string pgidstr
,
1606 ObjectStore::Sequencer
&osr
)
1610 PGLog::IndexedLog log
;
1611 bool skipped_objects
= false;
1614 finish_remove_pgs(store
);
1616 int ret
= read_super();
1620 if (sh
.magic
!= super_header::super_magic
) {
1621 cerr
<< "Invalid magic number" << std::endl
;
1625 if (sh
.version
> super_header::super_ver
) {
1626 cerr
<< "Can't handle export format version=" << sh
.version
<< std::endl
;
1630 //First section must be TYPE_PG_BEGIN
1632 ret
= read_section(&type
, &ebl
);
1635 if (type
== TYPE_POOL_BEGIN
) {
1636 cerr
<< "Pool exports cannot be imported into a PG" << std::endl
;
1638 } else if (type
!= TYPE_PG_BEGIN
) {
1639 cerr
<< "Invalid first section type " << std::to_string(type
) << std::endl
;
1643 bufferlist::iterator ebliter
= ebl
.begin();
1645 pgb
.decode(ebliter
);
1646 spg_t pgid
= pgb
.pgid
;
1647 spg_t orig_pgid
= pgid
;
1649 if (pgidstr
.length()) {
1652 bool ok
= user_pgid
.parse(pgidstr
.c_str());
1653 // This succeeded in main() already
1655 if (pgid
!= user_pgid
) {
1656 if (pgid
.pool() != user_pgid
.pool()) {
1657 cerr
<< "Can't specify a different pgid pool, must be " << pgid
.pool() << std::endl
;
1660 if (pgid
.is_no_shard() && !user_pgid
.is_no_shard()) {
1661 cerr
<< "Can't specify a sharded pgid with a non-sharded export" << std::endl
;
1664 // Get shard from export information if not specified
1665 if (!pgid
.is_no_shard() && user_pgid
.is_no_shard()) {
1666 user_pgid
.shard
= pgid
.shard
;
1668 if (pgid
.shard
!= user_pgid
.shard
) {
1669 cerr
<< "Can't specify a different shard, must be " << pgid
.shard
<< std::endl
;
1676 if (!pgb
.superblock
.cluster_fsid
.is_zero()
1677 && pgb
.superblock
.cluster_fsid
!= sb
.cluster_fsid
) {
1678 cerr
<< "Export came from different cluster with fsid "
1679 << pgb
.superblock
.cluster_fsid
<< std::endl
;
1684 cerr
<< "Exported features: " << pgb
.superblock
.compat_features
<< std::endl
;
1687 // Special case: Old export has SHARDS incompat feature on replicated pg, remove it
1688 if (pgid
.is_no_shard())
1689 pgb
.superblock
.compat_features
.incompat
.remove(CEPH_OSD_FEATURE_INCOMPAT_SHARDS
);
1691 if (sb
.compat_features
.compare(pgb
.superblock
.compat_features
) == -1) {
1692 CompatSet unsupported
= sb
.compat_features
.unsupported(pgb
.superblock
.compat_features
);
1694 cerr
<< "Export has incompatible features set " << unsupported
<< std::endl
;
1696 // Let them import if they specify the --force option
1698 return 11; // Positive return means exit status
1701 // Don't import if pool no longer exists
1704 ret
= get_osdmap(store
, sb
.current_epoch
, curmap
, bl
);
1706 cerr
<< "Can't find local OSDMap" << std::endl
;
1709 if (!curmap
.have_pg_pool(pgid
.pgid
.m_pool
)) {
1710 cerr
<< "Pool " << pgid
.pgid
.m_pool
<< " no longer exists" << std::endl
;
1711 // Special exit code for this error, used by test code
1712 return 10; // Positive return means exit status
1715 ghobject_t pgmeta_oid
= pgid
.make_pgmeta_oid();
1716 log_oid
= OSD::make_pg_log_oid(pgid
);
1717 biginfo_oid
= OSD::make_pg_biginfo_oid(pgid
);
1719 //Check for PG already present.
1721 if (store
->collection_exists(coll
)) {
1722 cerr
<< "pgid " << pgid
<< " already exists" << std::endl
;
1727 ObjectStore::Transaction t
;
1728 PG::_create(t
, pgid
,
1729 pgid
.get_split_bits(curmap
.get_pg_pool(pgid
.pool())->get_pg_num()));
1730 PG::_init(t
, pgid
, NULL
);
1732 // mark this coll for removal until we're done
1733 map
<string
,bufferlist
> values
;
1734 ::encode((char)1, values
["_remove"]);
1735 t
.omap_setkeys(coll
, pgid
.make_pgmeta_oid(), values
);
1737 store
->apply_transaction(&osr
, std::move(t
));
1740 cout
<< "Importing pgid " << pgid
;
1741 if (orig_pgid
!= pgid
) {
1742 cout
<< " exported as " << orig_pgid
;
1747 bool found_metadata
= false;
1748 metadata_section ms
;
1750 ret
= read_section(&type
, &ebl
);
1755 cout
<< __func__
<< ": Section type " << std::to_string(type
) << std::endl
;
1757 if (type
>= END_OF_TYPES
) {
1758 cout
<< "Skipping unknown section type" << std::endl
;
1762 case TYPE_OBJECT_BEGIN
:
1763 ret
= get_object(store
, coll
, ebl
, curmap
, &skipped_objects
, osr
);
1764 if (ret
) return ret
;
1766 case TYPE_PG_METADATA
:
1767 ret
= get_pg_metadata(store
, ebl
, ms
, sb
, curmap
, pgid
);
1768 if (ret
) return ret
;
1769 found_metadata
= true;
1775 cerr
<< "Unknown section type " << std::to_string(type
) << std::endl
;
1780 if (!found_metadata
) {
1781 cerr
<< "Missing metadata section" << std::endl
;
1785 ObjectStore::Transaction t
;
1787 pg_log_t newlog
, reject
;
1788 pg_log_t::filter_log(pgid
, curmap
, g_ceph_context
->_conf
->osd_hit_set_namespace
,
1789 ms
.log
, newlog
, reject
);
1791 for (list
<pg_log_entry_t
>::iterator i
= newlog
.log
.begin();
1792 i
!= newlog
.log
.end(); ++i
)
1793 cerr
<< "Keeping log entry " << *i
<< std::endl
;
1794 for (list
<pg_log_entry_t
>::iterator i
= reject
.log
.begin();
1795 i
!= reject
.log
.end(); ++i
)
1796 cerr
<< "Skipping log entry " << *i
<< std::endl
;
1799 divergent_priors_t newdp
, rejectdp
;
1800 filter_divergent_priors(pgid
, curmap
, g_ceph_context
->_conf
->osd_hit_set_namespace
,
1801 ms
.divergent_priors
, newdp
, rejectdp
);
1802 ms
.divergent_priors
= newdp
;
1804 for (divergent_priors_t::iterator i
= newdp
.begin();
1805 i
!= newdp
.end(); ++i
)
1806 cerr
<< "Keeping divergent_prior " << *i
<< std::endl
;
1807 for (divergent_priors_t::iterator i
= rejectdp
.begin();
1808 i
!= rejectdp
.end(); ++i
)
1809 cerr
<< "Skipping divergent_prior " << *i
<< std::endl
;
1812 ms
.missing
.filter_objects([&](const hobject_t
&obj
) {
1813 if (obj
.nspace
== g_ceph_context
->_conf
->osd_hit_set_namespace
)
1815 assert(!obj
.is_temp());
1816 object_t oid
= obj
.oid
;
1817 object_locator_t
loc(obj
);
1818 pg_t raw_pgid
= curmap
.object_locator_to_pg(oid
, loc
);
1819 pg_t _pgid
= curmap
.raw_pg_to_pg(raw_pgid
);
1821 return pgid
.pgid
!= _pgid
;
1826 pg_missing_t missing
;
1827 Formatter
*formatter
= Formatter::create("json-pretty");
1828 dump_log(formatter
, cerr
, newlog
, ms
.missing
);
1832 // Just like a split invalidate stats since the object count is changed
1833 if (skipped_objects
)
1834 ms
.info
.stats
.stats_invalid
= true;
1842 ms
.divergent_priors
,
1844 if (ret
) return ret
;
1847 // done, clear removal flag
1849 cerr
<< "done, clearing removal flag" << std::endl
;
1853 remove
.insert("_remove");
1854 t
.omap_rmkeys(coll
, pgid
.make_pgmeta_oid(), remove
);
1855 store
->apply_transaction(&osr
, std::move(t
));
1861 int do_list(ObjectStore
*store
, string pgidstr
, string object
, boost::optional
<std::string
> nspace
,
1862 Formatter
*formatter
, bool debug
, bool human_readable
, bool head
)
1865 lookup_ghobject
lookup(object
, nspace
, head
);
1866 if (pgidstr
.length() > 0) {
1867 r
= action_on_all_objects_in_pg(store
, pgidstr
, lookup
, debug
);
1869 r
= action_on_all_objects(store
, lookup
, debug
);
1873 lookup
.dump(formatter
, human_readable
);
1874 formatter
->flush(cout
);
1878 int do_meta(ObjectStore
*store
, string object
, Formatter
*formatter
, bool debug
, bool human_readable
)
1881 boost::optional
<std::string
> nspace
; // Not specified
1882 lookup_ghobject
lookup(object
, nspace
);
1883 r
= action_on_all_objects_in_exact_pg(store
, coll_t::meta(), lookup
, debug
);
1886 lookup
.dump(formatter
, human_readable
);
1887 formatter
->flush(cout
);
1891 int remove_object(coll_t coll
, ghobject_t
&ghobj
,
1893 MapCacher::Transaction
<std::string
, bufferlist
> *_t
,
1894 ObjectStore::Transaction
*t
)
1896 int r
= mapper
.remove_oid(ghobj
.hobj
, _t
);
1897 if (r
< 0 && r
!= -ENOENT
) {
1898 cerr
<< "remove_oid returned " << cpp_strerror(r
) << std::endl
;
1902 t
->remove(coll
, ghobj
);
1906 int get_snapset(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, SnapSet
&ss
, bool silent
);
1908 int do_remove_object(ObjectStore
*store
, coll_t coll
,
1909 ghobject_t
&ghobj
, bool all
, bool force
,
1910 ObjectStore::Sequencer
&osr
)
1913 coll
.is_pg_prefix(&pg
);
1917 OSD::make_snapmapper_oid());
1918 SnapMapper
mapper(g_ceph_context
, &driver
, 0, 0, 0, pg
.shard
);
1921 int r
= store
->stat(coll
, ghobj
, &st
);
1923 cerr
<< "remove: " << cpp_strerror(r
) << std::endl
;
1928 if (ghobj
.hobj
.has_snapset()) {
1929 r
= get_snapset(store
, coll
, ghobj
, ss
, false);
1931 cerr
<< "Can't get snapset error " << cpp_strerror(r
) << std::endl
;
1934 if (!ss
.snaps
.empty() && !all
) {
1936 cout
<< "WARNING: only removing "
1937 << (ghobj
.hobj
.is_head() ? "head" : "snapdir")
1938 << " with snapshots present" << std::endl
;
1941 cerr
<< "Snapshots are present, use removeall to delete everything" << std::endl
;
1947 ObjectStore::Transaction t
;
1948 OSDriver::OSTransaction
_t(driver
.get_transaction(&t
));
1950 cout
<< "remove " << ghobj
<< std::endl
;
1953 r
= remove_object(coll
, ghobj
, mapper
, &_t
, &t
);
1958 ghobject_t snapobj
= ghobj
;
1959 for (vector
<snapid_t
>::iterator i
= ss
.snaps
.begin() ;
1960 i
!= ss
.snaps
.end() ; ++i
) {
1961 snapobj
.hobj
.snap
= *i
;
1962 cout
<< "remove " << snapobj
<< std::endl
;
1964 r
= remove_object(coll
, snapobj
, mapper
, &_t
, &t
);
1971 store
->apply_transaction(&osr
, std::move(t
));
1976 int do_list_attrs(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
)
1978 map
<string
,bufferptr
> aset
;
1979 int r
= store
->getattrs(coll
, ghobj
, aset
);
1981 cerr
<< "getattrs: " << cpp_strerror(r
) << std::endl
;
1985 for (map
<string
,bufferptr
>::iterator i
= aset
.begin();i
!= aset
.end(); ++i
) {
1986 string
key(i
->first
);
1988 key
= cleanbin(key
);
1989 cout
<< key
<< std::endl
;
1994 int do_list_omap(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
)
1996 ObjectMap::ObjectMapIterator iter
= store
->get_omap_iterator(coll
, ghobj
);
1998 cerr
<< "omap_get_iterator: " << cpp_strerror(ENOENT
) << std::endl
;
2001 iter
->seek_to_first();
2002 map
<string
, bufferlist
> oset
;
2003 while(iter
->valid()) {
2004 get_omap_batch(iter
, oset
);
2006 for (map
<string
,bufferlist
>::iterator i
= oset
.begin();i
!= oset
.end(); ++i
) {
2007 string
key(i
->first
);
2009 key
= cleanbin(key
);
2010 cout
<< key
<< std::endl
;
2016 int do_get_bytes(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, int fd
)
2021 int ret
= store
->stat(coll
, ghobj
, &st
);
2023 cerr
<< "get-bytes: " << cpp_strerror(ret
) << std::endl
;
2029 cerr
<< "size=" << total
<< std::endl
;
2031 uint64_t offset
= 0;
2032 bufferlist rawdatabl
;
2035 mysize_t len
= max_read
;
2039 ret
= store
->read(coll
, ghobj
, offset
, len
, rawdatabl
);
2046 cerr
<< "data section offset=" << offset
<< " len=" << len
<< std::endl
;
2051 ret
= write(fd
, rawdatabl
.c_str(), ret
);
2061 int do_set_bytes(ObjectStore
*store
, coll_t coll
,
2062 ghobject_t
&ghobj
, int fd
,
2063 ObjectStore::Sequencer
&osr
)
2065 ObjectStore::Transaction tran
;
2066 ObjectStore::Transaction
*t
= &tran
;
2069 cerr
<< "Write " << ghobj
<< std::endl
;
2072 t
->touch(coll
, ghobj
);
2073 t
->truncate(coll
, ghobj
, 0);
2076 uint64_t offset
= 0;
2077 bufferlist rawdatabl
;
2080 ssize_t bytes
= rawdatabl
.read_fd(fd
, max_read
);
2082 cerr
<< "read_fd error " << cpp_strerror(bytes
) << std::endl
;
2090 cerr
<< "\tdata: offset " << offset
<< " bytes " << bytes
<< std::endl
;
2092 t
->write(coll
, ghobj
, offset
, bytes
, rawdatabl
);
2095 // XXX: Should we apply_transaction() every once in a while for very large files
2099 store
->apply_transaction(&osr
, std::move(*t
));
2103 int do_get_attr(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, string key
)
2107 int r
= store
->getattr(coll
, ghobj
, key
.c_str(), bp
);
2109 cerr
<< "getattr: " << cpp_strerror(r
) << std::endl
;
2113 string
value(bp
.c_str(), bp
.length());
2115 value
= cleanbin(value
);
2116 value
.push_back('\n');
2123 int do_set_attr(ObjectStore
*store
, coll_t coll
,
2124 ghobject_t
&ghobj
, string key
, int fd
,
2125 ObjectStore::Sequencer
&osr
)
2127 ObjectStore::Transaction tran
;
2128 ObjectStore::Transaction
*t
= &tran
;
2132 cerr
<< "Setattr " << ghobj
<< std::endl
;
2134 int ret
= get_fd_data(fd
, bl
);
2141 t
->touch(coll
, ghobj
);
2143 t
->setattr(coll
, ghobj
, key
, bl
);
2145 store
->apply_transaction(&osr
, std::move(*t
));
2149 int do_rm_attr(ObjectStore
*store
, coll_t coll
,
2150 ghobject_t
&ghobj
, string key
,
2151 ObjectStore::Sequencer
&osr
)
2153 ObjectStore::Transaction tran
;
2154 ObjectStore::Transaction
*t
= &tran
;
2157 cerr
<< "Rmattr " << ghobj
<< std::endl
;
2162 t
->rmattr(coll
, ghobj
, key
);
2164 store
->apply_transaction(&osr
, std::move(*t
));
2168 int do_get_omap(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, string key
)
2171 map
<string
, bufferlist
> out
;
2175 int r
= store
->omap_get_values(coll
, ghobj
, keys
, &out
);
2177 cerr
<< "omap_get_values: " << cpp_strerror(r
) << std::endl
;
2182 cerr
<< "Key not found" << std::endl
;
2186 assert(out
.size() == 1);
2188 bufferlist bl
= out
.begin()->second
;
2189 string
value(bl
.c_str(), bl
.length());
2191 value
= cleanbin(value
);
2192 value
.push_back('\n');
2199 int do_set_omap(ObjectStore
*store
, coll_t coll
,
2200 ghobject_t
&ghobj
, string key
, int fd
,
2201 ObjectStore::Sequencer
&osr
)
2203 ObjectStore::Transaction tran
;
2204 ObjectStore::Transaction
*t
= &tran
;
2205 map
<string
, bufferlist
> attrset
;
2209 cerr
<< "Set_omap " << ghobj
<< std::endl
;
2211 int ret
= get_fd_data(fd
, valbl
);
2215 attrset
.insert(pair
<string
, bufferlist
>(key
, valbl
));
2220 t
->touch(coll
, ghobj
);
2222 t
->omap_setkeys(coll
, ghobj
, attrset
);
2224 store
->apply_transaction(&osr
, std::move(*t
));
2228 int do_rm_omap(ObjectStore
*store
, coll_t coll
,
2229 ghobject_t
&ghobj
, string key
,
2230 ObjectStore::Sequencer
&osr
)
2232 ObjectStore::Transaction tran
;
2233 ObjectStore::Transaction
*t
= &tran
;
2239 cerr
<< "Rm_omap " << ghobj
<< std::endl
;
2244 t
->omap_rmkeys(coll
, ghobj
, keys
);
2246 store
->apply_transaction(&osr
, std::move(*t
));
2250 int do_get_omaphdr(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
)
2254 int r
= store
->omap_get_header(coll
, ghobj
, &hdrbl
, true);
2256 cerr
<< "omap_get_header: " << cpp_strerror(r
) << std::endl
;
2260 string
header(hdrbl
.c_str(), hdrbl
.length());
2262 header
= cleanbin(header
);
2263 header
.push_back('\n');
2270 int do_set_omaphdr(ObjectStore
*store
, coll_t coll
,
2271 ghobject_t
&ghobj
, int fd
,
2272 ObjectStore::Sequencer
&osr
)
2274 ObjectStore::Transaction tran
;
2275 ObjectStore::Transaction
*t
= &tran
;
2279 cerr
<< "Omap_setheader " << ghobj
<< std::endl
;
2281 int ret
= get_fd_data(fd
, hdrbl
);
2288 t
->touch(coll
, ghobj
);
2290 t
->omap_setheader(coll
, ghobj
, hdrbl
);
2292 store
->apply_transaction(&osr
, std::move(*t
));
2296 struct do_fix_lost
: public action_on_object_t
{
2297 ObjectStore::Sequencer
*osr
;
2299 explicit do_fix_lost(ObjectStore::Sequencer
*_osr
) : osr(_osr
) {}
2301 int call(ObjectStore
*store
, coll_t coll
,
2302 ghobject_t
&ghobj
, object_info_t
&oi
) override
{
2304 cout
<< coll
<< "/" << ghobj
<< " is lost";
2310 oi
.clear_flag(object_info_t::FLAG_LOST
);
2312 ::encode(oi
, bl
, -1); /* fixme: using full features */
2313 ObjectStore::Transaction t
;
2314 t
.setattr(coll
, ghobj
, OI_ATTR
, bl
);
2315 int r
= store
->apply_transaction(osr
, std::move(t
));
2317 cerr
<< "Error getting fixing attr on : " << make_pair(coll
, ghobj
)
2319 << cpp_strerror(r
) << std::endl
;
2327 int get_snapset(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, SnapSet
&ss
, bool silent
= false)
2330 int r
= store
->getattr(coll
, ghobj
, SS_ATTR
, attr
);
2333 cerr
<< "Error getting snapset on : " << make_pair(coll
, ghobj
) << ", "
2334 << cpp_strerror(r
) << std::endl
;
2337 bufferlist::iterator bp
= attr
.begin();
2342 cerr
<< "Error decoding snapset on : " << make_pair(coll
, ghobj
) << ", "
2343 << cpp_strerror(r
) << std::endl
;
2349 int print_obj_info(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, Formatter
* formatter
)
2352 formatter
->open_object_section("obj");
2353 formatter
->open_object_section("id");
2354 ghobj
.dump(formatter
);
2355 formatter
->close_section();
2358 int gr
= store
->getattr(coll
, ghobj
, OI_ATTR
, attr
);
2361 cerr
<< "Error getting attr on : " << make_pair(coll
, ghobj
) << ", "
2362 << cpp_strerror(r
) << std::endl
;
2365 bufferlist::iterator bp
= attr
.begin();
2368 formatter
->open_object_section("info");
2370 formatter
->close_section();
2373 cerr
<< "Error decoding attr on : " << make_pair(coll
, ghobj
) << ", "
2374 << cpp_strerror(r
) << std::endl
;
2378 int sr
= store
->stat(coll
, ghobj
, &st
, true);
2381 cerr
<< "Error stat on : " << make_pair(coll
, ghobj
) << ", "
2382 << cpp_strerror(r
) << std::endl
;
2384 formatter
->open_object_section("stat");
2385 formatter
->dump_int("size", st
.st_size
);
2386 formatter
->dump_int("blksize", st
.st_blksize
);
2387 formatter
->dump_int("blocks", st
.st_blocks
);
2388 formatter
->dump_int("nlink", st
.st_nlink
);
2389 formatter
->close_section();
2392 if (ghobj
.hobj
.has_snapset()) {
2394 int snr
= get_snapset(store
, coll
, ghobj
, ss
);
2398 formatter
->open_object_section("SnapSet");
2400 formatter
->close_section();
2404 gr
= store
->getattr(coll
, ghobj
, ECUtil::get_hinfo_key(), hattr
);
2406 ECUtil::HashInfo hinfo
;
2407 auto hp
= hattr
.begin();
2410 formatter
->open_object_section("hinfo");
2411 hinfo
.dump(formatter
);
2412 formatter
->close_section();
2415 cerr
<< "Error decoding hinfo on : " << make_pair(coll
, ghobj
) << ", "
2416 << cpp_strerror(r
) << std::endl
;
2419 formatter
->close_section();
2420 formatter
->flush(cout
);
2425 int corrupt_info(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, Formatter
* formatter
,
2426 ObjectStore::Sequencer
&osr
)
2429 int r
= store
->getattr(coll
, ghobj
, OI_ATTR
, attr
);
2431 cerr
<< "Error getting attr on : " << make_pair(coll
, ghobj
) << ", "
2432 << cpp_strerror(r
) << std::endl
;
2436 bufferlist::iterator bp
= attr
.begin();
2441 cerr
<< "Error getting attr on : " << make_pair(coll
, ghobj
) << ", "
2442 << cpp_strerror(r
) << std::endl
;
2445 cout
<< "Corrupting info" << std::endl
;
2448 oi
.alloc_hint_flags
+= 0xff;
2449 ObjectStore::Transaction t
;
2450 ::encode(oi
, attr
, -1); /* fixme: using full features */
2451 t
.setattr(coll
, ghobj
, OI_ATTR
, attr
);
2452 r
= store
->apply_transaction(&osr
, std::move(t
));
2454 cerr
<< "Error writing object info: " << make_pair(coll
, ghobj
) << ", "
2455 << cpp_strerror(r
) << std::endl
;
2462 int set_size(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, uint64_t setsize
, Formatter
* formatter
,
2463 ObjectStore::Sequencer
&osr
, bool corrupt
)
2465 if (ghobj
.hobj
.is_snapdir()) {
2466 cerr
<< "Can't set the size of a snapdir" << std::endl
;
2470 int r
= store
->getattr(coll
, ghobj
, OI_ATTR
, attr
);
2472 cerr
<< "Error getting attr on : " << make_pair(coll
, ghobj
) << ", "
2473 << cpp_strerror(r
) << std::endl
;
2477 bufferlist::iterator bp
= attr
.begin();
2482 cerr
<< "Error getting attr on : " << make_pair(coll
, ghobj
) << ", "
2483 << cpp_strerror(r
) << std::endl
;
2487 r
= store
->stat(coll
, ghobj
, &st
, true);
2489 cerr
<< "Error stat on : " << make_pair(coll
, ghobj
) << ", "
2490 << cpp_strerror(r
) << std::endl
;
2492 ghobject_t
head(ghobj
);
2494 bool found_head
= true;
2495 map
<snapid_t
, uint64_t>::iterator csi
;
2496 bool is_snap
= ghobj
.hobj
.is_snap();
2498 head
.hobj
= head
.hobj
.get_head();
2499 r
= get_snapset(store
, coll
, head
, ss
, true);
2500 if (r
< 0 && r
!= -ENOENT
) {
2501 // Requested get_snapset() silent, so if not -ENOENT show error
2502 cerr
<< "Error getting snapset on : " << make_pair(coll
, head
) << ", "
2503 << cpp_strerror(r
) << std::endl
;
2507 head
.hobj
= head
.hobj
.get_snapdir();
2508 r
= get_snapset(store
, coll
, head
, ss
);
2515 csi
= ss
.clone_size
.find(ghobj
.hobj
.snap
);
2516 if (csi
== ss
.clone_size
.end()) {
2517 cerr
<< "SnapSet is missing clone_size for snap " << ghobj
.hobj
.snap
<< std::endl
;
2521 if ((uint64_t)st
.st_size
== setsize
&& oi
.size
== setsize
2522 && (!is_snap
|| csi
->second
== setsize
)) {
2523 cout
<< "Size of object is already " << setsize
<< std::endl
;
2526 cout
<< "Setting size to " << setsize
<< ", stat size " << st
.st_size
2527 << ", obj info size " << oi
.size
;
2529 cout
<< ", " << (found_head
? "head" : "snapdir")
2530 << " clone_size " << csi
->second
;
2531 csi
->second
= setsize
;
2537 ObjectStore::Transaction t
;
2538 // Only modify object info if we want to corrupt it
2539 if (!corrupt
&& (uint64_t)st
.st_size
!= setsize
) {
2540 t
.truncate(coll
, ghobj
, setsize
);
2541 // Changing objectstore size will invalidate data_digest, so clear it.
2542 oi
.clear_data_digest();
2544 ::encode(oi
, attr
, -1); /* fixme: using full features */
2545 t
.setattr(coll
, ghobj
, OI_ATTR
, attr
);
2547 bufferlist snapattr
;
2549 ::encode(ss
, snapattr
);
2550 t
.setattr(coll
, head
, SS_ATTR
, snapattr
);
2552 r
= store
->apply_transaction(&osr
, std::move(t
));
2554 cerr
<< "Error writing object info: " << make_pair(coll
, ghobj
) << ", "
2555 << cpp_strerror(r
) << std::endl
;
2562 int clear_snapset(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
,
2563 string arg
, ObjectStore::Sequencer
&osr
)
2566 int ret
= get_snapset(store
, coll
, ghobj
, ss
);
2570 // Use "head" to set head_exists incorrectly
2571 if (arg
== "corrupt" || arg
== "head")
2572 ss
.head_exists
= !ghobj
.hobj
.is_head();
2573 else if (ss
.head_exists
!= ghobj
.hobj
.is_head()) {
2574 cerr
<< "Correcting head_exists, set to "
2575 << (ghobj
.hobj
.is_head() ? "true" : "false") << std::endl
;
2576 ss
.head_exists
= ghobj
.hobj
.is_head();
2578 // Use "corrupt" to clear entire SnapSet
2579 // Use "seq" to just corrupt SnapSet.seq
2580 if (arg
== "corrupt" || arg
== "seq")
2582 // Use "snaps" to just clear SnapSet.snaps
2583 if (arg
== "corrupt" || arg
== "snaps")
2585 // By default just clear clone, clone_overlap and clone_size
2586 if (arg
== "corrupt")
2588 if (arg
== "" || arg
== "clones")
2590 if (arg
== "" || arg
== "clone_overlap")
2591 ss
.clone_overlap
.clear();
2592 if (arg
== "" || arg
== "clone_size")
2593 ss
.clone_size
.clear();
2594 // Break all clone sizes by adding 1
2595 if (arg
== "size") {
2596 for (map
<snapid_t
, uint64_t>::iterator i
= ss
.clone_size
.begin();
2597 i
!= ss
.clone_size
.end(); ++i
)
2604 ObjectStore::Transaction t
;
2605 t
.setattr(coll
, ghobj
, SS_ATTR
, bl
);
2606 int r
= store
->apply_transaction(&osr
, std::move(t
));
2608 cerr
<< "Error setting snapset on : " << make_pair(coll
, ghobj
) << ", "
2609 << cpp_strerror(r
) << std::endl
;
2616 vector
<snapid_t
>::iterator
find(vector
<snapid_t
> &v
, snapid_t clid
)
2618 return std::find(v
.begin(), v
.end(), clid
);
2621 map
<snapid_t
, interval_set
<uint64_t> >::iterator
2622 find(map
<snapid_t
, interval_set
<uint64_t> > &m
, snapid_t clid
)
2624 return m
.find(clid
);
2627 map
<snapid_t
, uint64_t>::iterator
find(map
<snapid_t
, uint64_t> &m
,
2630 return m
.find(clid
);
2634 int remove_from(T
&mv
, string name
, snapid_t cloneid
, bool force
)
2636 typename
T::iterator i
= find(mv
, cloneid
);
2637 if (i
!= mv
.end()) {
2640 cerr
<< "Clone " << cloneid
<< " doesn't exist in " << name
;
2642 cerr
<< " (ignored)" << std::endl
;
2651 int remove_clone(ObjectStore
*store
, coll_t coll
, ghobject_t
&ghobj
, snapid_t cloneid
, bool force
,
2652 ObjectStore::Sequencer
&osr
)
2654 // XXX: Don't allow this if in a cache tier or former cache tier
2655 // bool allow_incomplete_clones() const {
2656 // return cache_mode != CACHEMODE_NONE || has_flag(FLAG_INCOMPLETE_CLONES);
2659 int ret
= get_snapset(store
, coll
, ghobj
, snapset
);
2663 // Derived from trim_object()
2665 vector
<snapid_t
>::iterator p
;
2666 for (p
= snapset
.clones
.begin(); p
!= snapset
.clones
.end(); ++p
)
2669 if (p
== snapset
.clones
.end()) {
2670 cerr
<< "Clone " << cloneid
<< " not present";
2673 if (p
!= snapset
.clones
.begin()) {
2674 // not the oldest... merge overlap into next older clone
2675 vector
<snapid_t
>::iterator n
= p
- 1;
2676 hobject_t prev_coid
= ghobj
.hobj
;
2677 prev_coid
.snap
= *n
;
2678 //bool adjust_prev_bytes = is_present_clone(prev_coid);
2680 //if (adjust_prev_bytes)
2681 // ctx->delta_stats.num_bytes -= snapset.get_clone_bytes(*n);
2683 snapset
.clone_overlap
[*n
].intersection_of(
2684 snapset
.clone_overlap
[*p
]);
2686 //if (adjust_prev_bytes)
2687 // ctx->delta_stats.num_bytes += snapset.get_clone_bytes(*n);
2690 ret
= remove_from(snapset
.clones
, "clones", cloneid
, force
);
2691 if (ret
) return ret
;
2692 ret
= remove_from(snapset
.clone_overlap
, "clone_overlap", cloneid
, force
);
2693 if (ret
) return ret
;
2694 ret
= remove_from(snapset
.clone_size
, "clone_size", cloneid
, force
);
2695 if (ret
) return ret
;
2701 ::encode(snapset
, bl
);
2702 ObjectStore::Transaction t
;
2703 t
.setattr(coll
, ghobj
, SS_ATTR
, bl
);
2704 int r
= store
->apply_transaction(&osr
, std::move(t
));
2706 cerr
<< "Error setting snapset on : " << make_pair(coll
, ghobj
) << ", "
2707 << cpp_strerror(r
) << std::endl
;
2710 cout
<< "Removal of clone " << cloneid
<< " complete" << std::endl
;
2711 cout
<< "Use pg repair after OSD restarted to correct stat information" << std::endl
;
2715 int dup(string srcpath
, ObjectStore
*src
, string dstpath
, ObjectStore
*dst
)
2717 cout
<< "dup from " << src
->get_type() << ": " << srcpath
<< "\n"
2718 << " to " << dst
->get_type() << ": " << dstpath
2720 ObjectStore::Sequencer
osr("dup");
2722 vector
<coll_t
> collections
;
2727 cerr
<< "failed to mount src: " << cpp_strerror(r
) << std::endl
;
2732 cerr
<< "failed to mount dst: " << cpp_strerror(r
) << std::endl
;
2736 if (src
->get_fsid() != dst
->get_fsid()) {
2737 cerr
<< "src fsid " << src
->get_fsid() << " != dest " << dst
->get_fsid()
2741 cout
<< "fsid " << src
->get_fsid() << std::endl
;
2743 // make sure dst is empty
2744 r
= dst
->list_collections(collections
);
2746 cerr
<< "error listing collections on dst: " << cpp_strerror(r
) << std::endl
;
2749 if (!collections
.empty()) {
2750 cerr
<< "destination store is not empty" << std::endl
;
2754 r
= src
->list_collections(collections
);
2756 cerr
<< "error listing collections on src: " << cpp_strerror(r
) << std::endl
;
2760 num
= collections
.size();
2761 cout
<< num
<< " collections" << std::endl
;
2763 for (auto cid
: collections
) {
2764 cout
<< i
++ << "/" << num
<< " " << cid
<< std::endl
;
2766 ObjectStore::Transaction t
;
2767 int bits
= src
->collection_bits(cid
);
2769 if (src
->get_type() == "filestore" && cid
.is_meta()) {
2772 cerr
<< "cannot get bit count for collection " << cid
<< ": "
2773 << cpp_strerror(bits
) << std::endl
;
2777 t
.create_collection(cid
, bits
);
2778 dst
->apply_transaction(&osr
, std::move(t
));
2783 uint64_t bytes
= 0, keys
= 0;
2785 vector
<ghobject_t
> ls
;
2786 r
= src
->collection_list(cid
, pos
, ghobject_t::get_max(), 1000, &ls
, &pos
);
2788 cerr
<< "collection_list on " << cid
<< " from " << pos
<< " got: "
2789 << cpp_strerror(r
) << std::endl
;
2796 for (auto& oid
: ls
) {
2797 //cout << " " << cid << " " << oid << std::endl;
2799 cout
<< " " << std::setw(16) << n
<< " objects, "
2800 << std::setw(16) << bytes
<< " bytes, "
2801 << std::setw(16) << keys
<< " keys"
2802 << std::setw(1) << "\r" << std::flush
;
2806 ObjectStore::Transaction t
;
2809 map
<string
,bufferptr
> attrs
;
2810 src
->getattrs(cid
, oid
, attrs
);
2811 if (!attrs
.empty()) {
2812 t
.setattrs(cid
, oid
, attrs
);
2816 src
->read(cid
, oid
, 0, 0, bl
);
2818 t
.write(cid
, oid
, 0, bl
.length(), bl
);
2819 bytes
+= bl
.length();
2823 map
<string
,bufferlist
> omap
;
2824 src
->omap_get(cid
, oid
, &header
, &omap
);
2825 if (header
.length()) {
2826 t
.omap_setheader(cid
, oid
, header
);
2829 if (!omap
.empty()) {
2830 keys
+= omap
.size();
2831 t
.omap_setkeys(cid
, oid
, omap
);
2834 dst
->apply_transaction(&osr
, std::move(t
));
2837 cout
<< " " << std::setw(16) << n
<< " objects, "
2838 << std::setw(16) << bytes
<< " bytes, "
2839 << std::setw(16) << keys
<< " keys"
2840 << std::setw(1) << std::endl
;
2844 cout
<< "keyring" << std::endl
;
2847 string s
= srcpath
+ "/keyring";
2849 r
= bl
.read_file(s
.c_str(), &err
);
2851 cerr
<< "failed to copy " << s
<< ": " << err
<< std::endl
;
2853 string d
= dstpath
+ "/keyring";
2854 bl
.write_file(d
.c_str(), 0600);
2859 cout
<< "duping osd metadata" << std::endl
;
2861 for (auto k
: {"magic", "whoami", "ceph_fsid", "fsid"}) {
2863 src
->read_meta(k
, &val
);
2864 dst
->write_meta(k
, val
);
2868 dst
->write_meta("ready", "ready");
2870 cout
<< "done." << std::endl
;
2879 void usage(po::options_description
&desc
)
2882 cerr
<< desc
<< std::endl
;
2884 cerr
<< "Positional syntax:" << std::endl
;
2886 cerr
<< "ceph-objectstore-tool ... <object> (get|set)-bytes [file]" << std::endl
;
2887 cerr
<< "ceph-objectstore-tool ... <object> set-(attr|omap) <key> [file]" << std::endl
;
2888 cerr
<< "ceph-objectstore-tool ... <object> (get|rm)-(attr|omap) <key>" << std::endl
;
2889 cerr
<< "ceph-objectstore-tool ... <object> get-omaphdr" << std::endl
;
2890 cerr
<< "ceph-objectstore-tool ... <object> set-omaphdr [file]" << std::endl
;
2891 cerr
<< "ceph-objectstore-tool ... <object> list-attrs" << std::endl
;
2892 cerr
<< "ceph-objectstore-tool ... <object> list-omap" << std::endl
;
2893 cerr
<< "ceph-objectstore-tool ... <object> remove|removeall" << std::endl
;
2894 cerr
<< "ceph-objectstore-tool ... <object> dump" << std::endl
;
2895 cerr
<< "ceph-objectstore-tool ... <object> set-size" << std::endl
;
2896 cerr
<< "ceph-objectstore-tool ... <object> remove-clone-metadata <cloneid>" << std::endl
;
2898 cerr
<< "<object> can be a JSON object description as displayed" << std::endl
;
2899 cerr
<< "by --op list." << std::endl
;
2900 cerr
<< "<object> can be an object name which will be looked up in all" << std::endl
;
2901 cerr
<< "the OSD's PGs." << std::endl
;
2902 cerr
<< "<object> can be the empty string ('') which with a provided pgid " << std::endl
;
2903 cerr
<< "specifies the pgmeta object" << std::endl
;
2905 cerr
<< "The optional [file] argument will read stdin or write stdout" << std::endl
;
2906 cerr
<< "if not specified or if '-' specified." << std::endl
;
2909 bool ends_with(const string
& check
, const string
& ending
)
2911 return check
.size() >= ending
.size() && check
.rfind(ending
) == (check
.size() - ending
.size());
2914 // Based on FileStore::dump_journal(), set-up enough to only dump
2915 int mydump_journal(Formatter
*f
, string journalpath
, bool m_journal_dio
)
2919 if (!journalpath
.length())
2922 FileJournal
*journal
= new FileJournal(g_ceph_context
, uuid_d(), NULL
, NULL
,
2923 journalpath
.c_str(), m_journal_dio
);
2924 r
= journal
->_fdump(*f
, false);
2929 int apply_layout_settings(ObjectStore
*os
, const OSDSuperblock
&superblock
,
2930 const string
&pool_name
, const spg_t
&pgid
, bool dry_run
,
2935 FileStore
*fs
= dynamic_cast<FileStore
*>(os
);
2937 cerr
<< "Nothing to do for non-filestore backend" << std::endl
;
2938 return 0; // making this return success makes testing easier
2943 r
= get_osdmap(os
, superblock
.current_epoch
, curmap
, bl
);
2945 cerr
<< "Can't find local OSDMap: " << cpp_strerror(r
) << std::endl
;
2949 int64_t poolid
= -1;
2950 if (pool_name
.length()) {
2951 poolid
= curmap
.lookup_pg_pool_name(pool_name
);
2953 cerr
<< "Couldn't find pool " << pool_name
<< ": " << cpp_strerror(poolid
)
2959 vector
<coll_t
> collections
, filtered_colls
;
2960 r
= os
->list_collections(collections
);
2962 cerr
<< "Error listing collections: " << cpp_strerror(r
) << std::endl
;
2966 for (auto const &coll
: collections
) {
2968 if (coll
.is_pg(&coll_pgid
) &&
2969 ((poolid
>= 0 && coll_pgid
.pool() == (uint64_t)poolid
) ||
2970 coll_pgid
== pgid
)) {
2971 filtered_colls
.push_back(coll
);
2975 size_t done
= 0, total
= filtered_colls
.size();
2976 for (auto const &coll
: filtered_colls
) {
2978 cerr
<< "Would apply layout settings to " << coll
<< std::endl
;
2980 cerr
<< "Finished " << done
<< "/" << total
<< " collections" << "\r";
2981 r
= fs
->apply_layout_settings(coll
, target_level
);
2983 cerr
<< "Error applying layout settings to " << coll
<< std::endl
;
2990 cerr
<< "Finished " << total
<< "/" << total
<< " collections" << "\r" << std::endl
;
2994 int main(int argc
, char **argv
)
2996 string dpath
, jpath
, pgidstr
, op
, file
, mountpoint
, mon_store_path
, object
;
2997 string target_data_path
, fsid
;
2998 string objcmd
, arg1
, arg2
, type
, format
, argnspace
, pool
;
2999 boost::optional
<std::string
> nspace
;
3003 bool human_readable
;
3005 Formatter
*formatter
;
3008 po::options_description
desc("Allowed options");
3010 ("help", "produce help message")
3011 ("type", po::value
<string
>(&type
),
3012 "Arg is one of [bluestore, filestore (default), memstore]")
3013 ("data-path", po::value
<string
>(&dpath
),
3014 "path to object store, mandatory")
3015 ("journal-path", po::value
<string
>(&jpath
),
3016 "path to journal, use if tool can't find it")
3017 ("pgid", po::value
<string
>(&pgidstr
),
3018 "PG id, mandatory for info, log, remove, export, export-remove, rm-past-intervals, mark-complete, trim-pg-log, and mandatory for apply-layout-settings if --pool is not specified")
3019 ("pool", po::value
<string
>(&pool
),
3020 "Pool name, mandatory for apply-layout-settings if --pgid is not specified")
3021 ("op", po::value
<string
>(&op
),
3022 "Arg is one of [info, log, remove, mkfs, fsck, repair, fuse, dup, export, export-remove, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, "
3023 "get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, apply-layout-settings, update-mon-db, dump-import, trim-pg-log]")
3024 ("epoch", po::value
<unsigned>(&epoch
),
3025 "epoch# for get-osdmap and get-inc-osdmap, the current epoch in use if not specified")
3026 ("file", po::value
<string
>(&file
),
3027 "path of file to export, export-remove, import, get-osdmap, set-osdmap, get-inc-osdmap or set-inc-osdmap")
3028 ("mon-store-path", po::value
<string
>(&mon_store_path
),
3029 "path of monstore to update-mon-db")
3030 ("fsid", po::value
<string
>(&fsid
),
3031 "fsid for new store created by mkfs")
3032 ("target-data-path", po::value
<string
>(&target_data_path
),
3033 "path of target object store (for --op dup)")
3034 ("mountpoint", po::value
<string
>(&mountpoint
),
3036 ("format", po::value
<string
>(&format
)->default_value("json-pretty"),
3037 "Output format which may be json, json-pretty, xml, xml-pretty")
3038 ("debug", "Enable diagnostic output to stderr")
3039 ("force", "Ignore some types of errors and proceed with operation - USE WITH CAUTION: CORRUPTION POSSIBLE NOW OR IN THE FUTURE")
3040 ("skip-journal-replay", "Disable journal replay")
3041 ("skip-mount-omap", "Disable mounting of omap")
3042 ("head", "Find head/snapdir when searching for objects by name")
3043 ("dry-run", "Don't modify the objectstore")
3044 ("namespace", po::value
<string
>(&argnspace
), "Specify namespace when searching for objects")
3047 po::options_description
positional("Positional options");
3048 positional
.add_options()
3049 ("object", po::value
<string
>(&object
), "'' for pgmeta_oid, object name or ghobject in json")
3050 ("objcmd", po::value
<string
>(&objcmd
), "command [(get|set)-bytes, (get|set|rm)-(attr|omap), (get|set)-omaphdr, list-attrs, list-omap, remove]")
3051 ("arg1", po::value
<string
>(&arg1
), "arg1 based on cmd")
3052 ("arg2", po::value
<string
>(&arg2
), "arg2 based on cmd")
3055 po::options_description all
;
3056 all
.add(desc
).add(positional
);
3058 po::positional_options_description pd
;
3059 pd
.add("object", 1).add("objcmd", 1).add("arg1", 1).add("arg2", 1);
3061 vector
<string
> ceph_option_strings
;
3062 po::variables_map vm
;
3064 po::parsed_options parsed
=
3065 po::command_line_parser(argc
, argv
).options(all
).allow_unregistered().positional(pd
).run();
3066 po::store( parsed
, vm
);
3068 ceph_option_strings
= po::collect_unrecognized(parsed
.options
,
3069 po::include_positional
);
3070 } catch(po::error
&e
) {
3071 std::cerr
<< e
.what() << std::endl
;
3075 if (vm
.count("help")) {
3080 debug
= (vm
.count("debug") > 0);
3082 force
= (vm
.count("force") > 0);
3084 if (vm
.count("namespace"))
3087 dry_run
= (vm
.count("dry-run") > 0);
3089 osflagbits_t flags
= 0;
3090 if (dry_run
|| vm
.count("skip-journal-replay"))
3091 flags
|= SKIP_JOURNAL_REPLAY
;
3092 if (vm
.count("skip-mount-omap"))
3093 flags
|= SKIP_MOUNT_OMAP
;
3094 if (op
== "update-mon-db")
3095 flags
|= SKIP_JOURNAL_REPLAY
;
3097 head
= (vm
.count("head") > 0);
3099 vector
<const char *> ceph_options
;
3100 env_to_vec(ceph_options
);
3101 ceph_options
.reserve(ceph_options
.size() + ceph_option_strings
.size());
3102 for (vector
<string
>::iterator i
= ceph_option_strings
.begin();
3103 i
!= ceph_option_strings
.end();
3105 ceph_options
.push_back(i
->c_str());
3109 snprintf(fn
, sizeof(fn
), "%s/type", dpath
.c_str());
3110 int fd
= ::open(fn
, O_RDONLY
);
3115 string dp_type
= string(bl
.c_str(), bl
.length() - 1); // drop \n
3116 if (vm
.count("type") && dp_type
!= "" && type
!= dp_type
)
3117 cerr
<< "WARNING: Ignoring type \"" << type
<< "\" - found data-path type \""
3118 << dp_type
<< "\"" << std::endl
;
3120 //cout << "object store type is " << type << std::endl;
3124 if (!vm
.count("type") && type
== "") {
3127 if (!vm
.count("data-path") &&
3128 op
!= "dump-import" &&
3129 !(op
== "dump-journal" && type
== "filestore")) {
3130 cerr
<< "Must provide --data-path" << std::endl
;
3134 if (type
== "filestore" && !vm
.count("journal-path")) {
3135 jpath
= dpath
+ "/journal";
3137 if (!vm
.count("op") && !vm
.count("object")) {
3138 cerr
<< "Must provide --op or object command..." << std::endl
;
3142 if (op
!= "list" && op
!= "apply-layout-settings" &&
3143 vm
.count("op") && vm
.count("object")) {
3144 cerr
<< "Can't specify both --op and object command syntax" << std::endl
;
3148 if (op
== "apply-layout-settings" && !(vm
.count("pool") ^ vm
.count("pgid"))) {
3149 cerr
<< "apply-layout-settings requires either --pool or --pgid"
3154 if (op
!= "list" && op
!= "apply-layout-settings" && vm
.count("object") && !vm
.count("objcmd")) {
3155 cerr
<< "Invalid syntax, missing command" << std::endl
;
3159 if (op
== "fuse" && mountpoint
.length() == 0) {
3160 cerr
<< "Missing fuse mountpoint" << std::endl
;
3164 outistty
= isatty(STDOUT_FILENO
);
3167 if ((op
== "export" || op
== "export-remove" || op
== "get-osdmap" || op
== "get-inc-osdmap") && !dry_run
) {
3168 if (!vm
.count("file") || file
== "-") {
3170 cerr
<< "stdout is a tty and no --file filename specified" << std::endl
;
3173 file_fd
= STDOUT_FILENO
;
3175 file_fd
= open(file
.c_str(), O_WRONLY
|O_CREAT
|O_TRUNC
, 0666);
3177 } else if (op
== "import" || op
== "dump-import" || op
== "set-osdmap" || op
== "set-inc-osdmap") {
3178 if (!vm
.count("file") || file
== "-") {
3179 if (isatty(STDIN_FILENO
)) {
3180 cerr
<< "stdin is a tty and no --file filename specified" << std::endl
;
3183 file_fd
= STDIN_FILENO
;
3185 file_fd
= open(file
.c_str(), O_RDONLY
);
3189 ObjectStoreTool tool
= ObjectStoreTool(file_fd
, dry_run
);
3191 if (vm
.count("file") && file_fd
== fd_none
&& !dry_run
) {
3192 cerr
<< "--file option only applies to import, dump-import, export, export-remove, "
3193 << "get-osdmap, set-osdmap, get-inc-osdmap or set-inc-osdmap" << std::endl
;
3197 if (file_fd
!= fd_none
&& file_fd
< 0) {
3198 string err
= string("file: ") + file
;
3199 perror(err
.c_str());
3203 auto cct
= global_init(
3204 NULL
, ceph_options
, CEPH_ENTITY_TYPE_OSD
,
3205 CODE_ENVIRONMENT_UTILITY_NODOUT
, 0);
3206 //CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
3207 common_init_finish(g_ceph_context
);
3208 g_conf
= g_ceph_context
->_conf
;
3210 g_conf
->set_val_or_die("log_to_stderr", "true");
3211 g_conf
->set_val_or_die("err_to_stderr", "true");
3213 g_conf
->apply_changes(NULL
);
3215 // Special list handling. Treating pretty_format as human readable,
3216 // with one object per line and not an enclosing array.
3217 human_readable
= ends_with(format
, "-pretty");
3218 if ((op
== "list" || op
== "meta-list") && human_readable
) {
3219 // Remove -pretty from end of format which we know is there
3220 format
= format
.substr(0, format
.size() - strlen("-pretty"));
3223 formatter
= Formatter::create(format
);
3224 if (formatter
== NULL
) {
3225 cerr
<< "unrecognized format: " << format
<< std::endl
;
3229 // Special handling for filestore journal, so we can dump it without mounting
3230 if (op
== "dump-journal" && type
== "filestore") {
3231 int ret
= mydump_journal(formatter
, jpath
, g_conf
->journal_dio
);
3233 cerr
<< "journal-path: " << jpath
<< ": "
3234 << cpp_strerror(ret
) << std::endl
;
3237 formatter
->flush(cout
);
3241 if (op
== "dump-import") {
3242 int ret
= tool
.dump_import(formatter
);
3244 cerr
<< "dump-import: "
3245 << cpp_strerror(ret
) << std::endl
;
3251 //Verify that data-path really exists
3253 if (::stat(dpath
.c_str(), &st
) == -1) {
3254 string err
= string("data-path: ") + dpath
;
3255 perror(err
.c_str());
3259 if (pgidstr
.length() && !pgid
.parse(pgidstr
.c_str())) {
3260 cerr
<< "Invalid pgid '" << pgidstr
<< "' specified" << std::endl
;
3264 //Verify that the journal-path really exists
3265 if (type
== "filestore") {
3266 if (::stat(jpath
.c_str(), &st
) == -1) {
3267 string err
= string("journal-path: ") + jpath
;
3268 perror(err
.c_str());
3271 if (S_ISDIR(st
.st_mode
)) {
3272 cerr
<< "journal-path: " << jpath
<< ": "
3273 << cpp_strerror(EISDIR
) << std::endl
;
3278 ObjectStore
*fs
= ObjectStore::create(g_ceph_context
, type
, dpath
, jpath
, flags
);
3280 cerr
<< "Unable to create store of type " << type
<< std::endl
;
3284 if (op
== "fsck" || op
== "fsck-deep") {
3285 int r
= fs
->fsck(op
== "fsck-deep");
3287 cerr
<< "fsck failed: " << cpp_strerror(r
) << std::endl
;
3291 cerr
<< "fsck found " << r
<< " errors" << std::endl
;
3294 cout
<< "fsck found no errors" << std::endl
;
3297 if (op
== "repair" || op
== "repair-deep") {
3298 int r
= fs
->repair(op
== "repair-deep");
3300 cerr
<< "repair failed: " << cpp_strerror(r
) << std::endl
;
3304 cerr
<< "repair found " << r
<< " errors" << std::endl
;
3307 cout
<< "repair found no errors" << std::endl
;
3311 if (fsid
.length()) {
3313 bool r
= f
.parse(fsid
.c_str());
3315 cerr
<< "failed to parse uuid '" << fsid
<< "'" << std::endl
;
3322 cerr
<< "mkfs failed: " << cpp_strerror(r
) << std::endl
;
3330 snprintf(fn
, sizeof(fn
), "%s/type", target_data_path
.c_str());
3331 int fd
= ::open(fn
, O_RDONLY
);
3333 cerr
<< "Unable to open " << target_data_path
<< "/type" << std::endl
;
3339 target_type
= string(bl
.c_str(), bl
.length() - 1); // drop \n
3342 ObjectStore
*targetfs
= ObjectStore::create(
3343 g_ceph_context
, target_type
,
3344 target_data_path
, "", 0);
3345 if (targetfs
== NULL
) {
3346 cerr
<< "Unable to open store of type " << target_type
<< std::endl
;
3349 int r
= dup(dpath
, fs
, target_data_path
, targetfs
);
3351 cerr
<< "dup failed: " << cpp_strerror(r
) << std::endl
;
3357 ObjectStore::Sequencer
*osr
= new ObjectStore::Sequencer(__func__
);
3358 int ret
= fs
->mount();
3360 if (ret
== -EBUSY
) {
3361 cerr
<< "OSD has the store locked" << std::endl
;
3363 cerr
<< "Mount failed with '" << cpp_strerror(ret
) << "'" << std::endl
;
3370 FuseStore
fuse(fs
, mountpoint
);
3371 cout
<< "mounting fuse at " << mountpoint
<< " ..." << std::endl
;
3372 int r
= fuse
.main();
3374 cerr
<< "failed to mount fuse: " << cpp_strerror(r
) << std::endl
;
3378 cerr
<< "fuse support not enabled" << std::endl
;
3384 vector
<coll_t
>::iterator it
;
3385 CompatSet supported
;
3387 #ifdef INTERNAL_TEST
3388 supported
= get_test_compat_set();
3390 supported
= OSD::get_osd_compat_set();
3394 OSDSuperblock superblock
;
3395 bufferlist::iterator p
;
3396 ret
= fs
->read(coll_t::meta(), OSD_SUPERBLOCK_GOBJECT
, 0, 0, bl
);
3398 cerr
<< "Failure to read OSD superblock: " << cpp_strerror(ret
) << std::endl
;
3403 ::decode(superblock
, p
);
3406 cerr
<< "Cluster fsid=" << superblock
.cluster_fsid
<< std::endl
;
3410 cerr
<< "Supported features: " << supported
<< std::endl
;
3411 cerr
<< "On-disk features: " << superblock
.compat_features
<< std::endl
;
3413 if (supported
.compare(superblock
.compat_features
) == -1) {
3414 CompatSet unsupported
= supported
.unsupported(superblock
.compat_features
);
3415 cerr
<< "On-disk OSD incompatible features set "
3416 << unsupported
<< std::endl
;
3421 if (op
== "apply-layout-settings") {
3422 int target_level
= 0;
3423 // Single positional argument with apply-layout-settings
3424 // for target_level.
3425 if (vm
.count("object") && isdigit(object
[0])) {
3426 target_level
= atoi(object
.c_str());
3427 // This requires --arg1 to be specified since
3428 // this is the third positional argument and normally
3429 // used with object operations.
3430 } else if (vm
.count("arg1") && isdigit(arg1
[0])) {
3431 target_level
= atoi(arg1
.c_str());
3433 ret
= apply_layout_settings(fs
, superblock
, pool
, pgid
, dry_run
, target_level
);
3437 if (op
!= "list" && vm
.count("object")) {
3438 // Special case: Create pgmeta_oid if empty string specified
3439 // This can't conflict with any actual object names.
3441 ghobj
= pgid
.make_pgmeta_oid();
3443 json_spirit::Value v
;
3445 if (!json_spirit::read(object
, v
) ||
3446 (v
.type() != json_spirit::array_type
&& v
.type() != json_spirit::obj_type
)) {
3447 // Special: Need head/snapdir so set even if user didn't specify
3448 if (vm
.count("objcmd") && (objcmd
== "remove-clone-metadata"))
3450 lookup_ghobject
lookup(object
, nspace
, head
);
3451 if (pgidstr
.length())
3452 ret
= action_on_all_objects_in_exact_pg(fs
, coll_t(pgid
), lookup
, debug
);
3454 ret
= action_on_all_objects(fs
, lookup
, debug
);
3456 throw std::runtime_error("Internal error");
3458 if (lookup
.size() != 1) {
3460 if (lookup
.size() == 0)
3461 ss
<< "No object id '" << object
<< "' found or invalid JSON specified";
3463 ss
<< "Found " << lookup
.size() << " objects with id '" << object
3464 << "', please use a JSON spec from --op list instead";
3465 throw std::runtime_error(ss
.str());
3467 pair
<coll_t
, ghobject_t
> found
= lookup
.pop();
3468 pgidstr
= found
.first
.to_str();
3469 pgid
.parse(pgidstr
.c_str());
3470 ghobj
= found
.second
;
3474 if (pgidstr
.length() == 0 && v
.type() != json_spirit::array_type
) {
3475 ss
<< "Without --pgid the object '" << object
3476 << "' must be a JSON array";
3477 throw std::runtime_error(ss
.str());
3479 if (v
.type() == json_spirit::array_type
) {
3480 json_spirit::Array array
= v
.get_array();
3481 if (array
.size() != 2) {
3482 ss
<< "Object '" << object
3483 << "' must be a JSON array with 2 elements";
3484 throw std::runtime_error(ss
.str());
3486 vector
<json_spirit::Value
>::iterator i
= array
.begin();
3487 assert(i
!= array
.end());
3488 if (i
->type() != json_spirit::str_type
) {
3489 ss
<< "Object '" << object
3490 << "' must be a JSON array with the first element a string";
3491 throw std::runtime_error(ss
.str());
3493 string object_pgidstr
= i
->get_str();
3494 if (object_pgidstr
!= "meta") {
3496 object_pgid
.parse(object_pgidstr
.c_str());
3497 if (pgidstr
.length() > 0) {
3498 if (object_pgid
!= pgid
) {
3499 ss
<< "object '" << object
3500 << "' has a pgid different from the --pgid="
3501 << pgidstr
<< " option";
3502 throw std::runtime_error(ss
.str());
3505 pgidstr
= object_pgidstr
;
3509 pgidstr
= object_pgidstr
;
3516 } catch (std::runtime_error
& e
) {
3517 ss
<< "Decode object JSON error: " << e
.what();
3518 throw std::runtime_error(ss
.str());
3520 if (pgidstr
!= "meta" && (uint64_t)pgid
.pgid
.m_pool
!= (uint64_t)ghobj
.hobj
.pool
) {
3521 cerr
<< "Object pool and pgid pool don't match" << std::endl
;
3526 } catch (std::runtime_error
& e
) {
3527 cerr
<< e
.what() << std::endl
;
3534 // The ops which require --pgid option are checked here and
3535 // mentioned in the usage for --pgid.
3536 if ((op
== "info" || op
== "log" || op
== "remove" || op
== "export"
3537 || op
== "export-remove" || op
== "rm-past-intervals"
3538 || op
== "mark-complete" || op
== "trim-pg-log") &&
3539 pgidstr
.length() == 0) {
3540 cerr
<< "Must provide pgid" << std::endl
;
3546 if (op
== "import") {
3549 ret
= tool
.do_import(fs
, superblock
, force
, pgidstr
, *osr
);
3551 catch (const buffer::error
&e
) {
3552 cerr
<< "do_import threw exception error " << e
.what() << std::endl
;
3555 if (ret
== -EFAULT
) {
3556 cerr
<< "Corrupt input for import" << std::endl
;
3559 cout
<< "Import successful" << std::endl
;
3561 } else if (op
== "dump-journal-mount") {
3562 // Undocumented feature to dump journal with mounted fs
3563 // This doesn't support the format option, but it uses the
3564 // ObjectStore::dump_journal() and mounts to get replay to run.
3565 ret
= fs
->dump_journal(cout
);
3567 if (ret
== -EOPNOTSUPP
) {
3568 cerr
<< "Object store type \"" << type
<< "\" doesn't support journal dump" << std::endl
;
3570 cerr
<< "Journal dump failed with error " << cpp_strerror(ret
) << std::endl
;
3574 } else if (op
== "get-osdmap") {
3578 epoch
= superblock
.current_epoch
;
3580 ret
= get_osdmap(fs
, epoch
, osdmap
, bl
);
3582 cerr
<< "Failed to get osdmap#" << epoch
<< ": "
3583 << cpp_strerror(ret
) << std::endl
;
3586 ret
= bl
.write_fd(file_fd
);
3588 cerr
<< "Failed to write to " << file
<< ": " << cpp_strerror(ret
) << std::endl
;
3590 cout
<< "osdmap#" << epoch
<< " exported." << std::endl
;
3593 } else if (op
== "set-osdmap") {
3595 ret
= get_fd_data(file_fd
, bl
);
3597 cerr
<< "Failed to read osdmap " << cpp_strerror(ret
) << std::endl
;
3599 ret
= set_osdmap(fs
, epoch
, bl
, force
, *osr
);
3602 } else if (op
== "get-inc-osdmap") {
3605 epoch
= superblock
.current_epoch
;
3607 ret
= get_inc_osdmap(fs
, epoch
, bl
);
3609 cerr
<< "Failed to get incremental osdmap# " << epoch
<< ": "
3610 << cpp_strerror(ret
) << std::endl
;
3613 ret
= bl
.write_fd(file_fd
);
3615 cerr
<< "Failed to write to " << file
<< ": " << cpp_strerror(ret
) << std::endl
;
3617 cout
<< "inc-osdmap#" << epoch
<< " exported." << std::endl
;
3620 } else if (op
== "set-inc-osdmap") {
3622 ret
= get_fd_data(file_fd
, bl
);
3624 cerr
<< "Failed to read incremental osdmap " << cpp_strerror(ret
) << std::endl
;
3627 ret
= set_inc_osdmap(fs
, epoch
, bl
, force
, *osr
);
3630 } else if (op
== "update-mon-db") {
3631 if (!vm
.count("mon-store-path")) {
3632 cerr
<< "Please specify the path to monitor db to update" << std::endl
;
3635 ret
= update_mon_db(*fs
, superblock
, dpath
+ "/keyring", mon_store_path
);
3640 log_oid
= OSD::make_pg_log_oid(pgid
);
3641 biginfo_oid
= OSD::make_pg_biginfo_oid(pgid
);
3643 if (op
== "remove") {
3644 if (!force
&& !dry_run
) {
3645 cerr
<< "Please use export-remove or you must use --force option" << std::endl
;
3649 ret
= initiate_new_remove_pg(fs
, pgid
, *osr
);
3651 cerr
<< "PG '" << pgid
<< "' not found" << std::endl
;
3654 cout
<< "Remove successful" << std::endl
;
3658 if (op
== "fix-lost") {
3659 boost::scoped_ptr
<action_on_object_t
> action
;
3660 action
.reset(new do_fix_lost(osr
));
3661 if (pgidstr
.length())
3662 ret
= action_on_all_objects_in_exact_pg(fs
, coll_t(pgid
), *action
, debug
);
3664 ret
= action_on_all_objects(fs
, *action
, debug
);
3669 ret
= do_list(fs
, pgidstr
, object
, nspace
, formatter
, debug
,
3670 human_readable
, head
);
3672 cerr
<< "do_list failed: " << cpp_strerror(ret
) << std::endl
;
3677 if (op
== "dump-super") {
3678 formatter
->open_object_section("superblock");
3679 superblock
.dump(formatter
);
3680 formatter
->close_section();
3681 formatter
->flush(cout
);
3686 if (op
== "meta-list") {
3687 ret
= do_meta(fs
, object
, formatter
, debug
, human_readable
);
3689 cerr
<< "do_meta failed: " << cpp_strerror(ret
) << std::endl
;
3694 ret
= fs
->list_collections(ls
);
3696 cerr
<< "failed to list pgs: " << cpp_strerror(ret
) << std::endl
;
3700 if (debug
&& op
== "list-pgs")
3701 cout
<< "Performing list-pgs operation" << std::endl
;
3704 for (it
= ls
.begin(); it
!= ls
.end(); ++it
) {
3707 if (pgidstr
== "meta") {
3708 if (it
->to_str() == "meta")
3714 if (!it
->is_pg(&tmppgid
)) {
3718 if (it
->is_temp(&tmppgid
)) {
3722 if (op
!= "list-pgs" && tmppgid
!= pgid
) {
3726 if (op
!= "list-pgs") {
3731 cout
<< tmppgid
<< std::endl
;
3734 if (op
== "list-pgs") {
3739 // If not an object command nor any of the ops handled below, then output this usage
3740 // before complaining about a bad pgid
3741 if (!vm
.count("objcmd") && op
!= "export" && op
!= "export-remove" && op
!= "info" && op
!= "log" && op
!= "rm-past-intervals" && op
!= "mark-complete" && op
!= "trim-pg-log") {
3742 cerr
<< "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, "
3743 "get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, dump-import, trim-pg-log)"
3750 // The following code for export, info, log require omap or !skip-mount-omap
3751 if (it
!= ls
.end()) {
3755 if (vm
.count("objcmd")) {
3757 if (objcmd
== "remove" || objcmd
== "removeall") {
3758 bool all
= (objcmd
== "removeall");
3759 ret
= do_remove_object(fs
, coll
, ghobj
, all
, force
, *osr
);
3761 } else if (objcmd
== "list-attrs") {
3762 ret
= do_list_attrs(fs
, coll
, ghobj
);
3764 } else if (objcmd
== "list-omap") {
3765 ret
= do_list_omap(fs
, coll
, ghobj
);
3767 } else if (objcmd
== "get-bytes" || objcmd
== "set-bytes") {
3768 if (objcmd
== "get-bytes") {
3770 if (vm
.count("arg1") == 0 || arg1
== "-") {
3773 fd
= open(arg1
.c_str(), O_WRONLY
|O_TRUNC
|O_CREAT
|O_EXCL
|O_LARGEFILE
, 0666);
3775 cerr
<< "open " << arg1
<< " " << cpp_strerror(errno
) << std::endl
;
3780 ret
= do_get_bytes(fs
, coll
, ghobj
, fd
);
3781 if (fd
!= STDOUT_FILENO
)
3785 if (vm
.count("arg1") == 0 || arg1
== "-") {
3786 // Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
3787 if (isatty(STDIN_FILENO
)) {
3788 cerr
<< "stdin is a tty and no file specified" << std::endl
;
3794 fd
= open(arg1
.c_str(), O_RDONLY
|O_LARGEFILE
, 0666);
3796 cerr
<< "open " << arg1
<< " " << cpp_strerror(errno
) << std::endl
;
3801 ret
= do_set_bytes(fs
, coll
, ghobj
, fd
, *osr
);
3802 if (fd
!= STDIN_FILENO
)
3806 } else if (objcmd
== "get-attr") {
3807 if (vm
.count("arg1") == 0) {
3812 ret
= do_get_attr(fs
, coll
, ghobj
, arg1
);
3814 } else if (objcmd
== "set-attr") {
3815 if (vm
.count("arg1") == 0) {
3821 if (vm
.count("arg2") == 0 || arg2
== "-") {
3822 // Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
3823 if (isatty(STDIN_FILENO
)) {
3824 cerr
<< "stdin is a tty and no file specified" << std::endl
;
3830 fd
= open(arg2
.c_str(), O_RDONLY
|O_LARGEFILE
, 0666);
3832 cerr
<< "open " << arg2
<< " " << cpp_strerror(errno
) << std::endl
;
3837 ret
= do_set_attr(fs
, coll
, ghobj
, arg1
, fd
, *osr
);
3838 if (fd
!= STDIN_FILENO
)
3841 } else if (objcmd
== "rm-attr") {
3842 if (vm
.count("arg1") == 0) {
3847 ret
= do_rm_attr(fs
, coll
, ghobj
, arg1
, *osr
);
3849 } else if (objcmd
== "get-omap") {
3850 if (vm
.count("arg1") == 0) {
3855 ret
= do_get_omap(fs
, coll
, ghobj
, arg1
);
3857 } else if (objcmd
== "set-omap") {
3858 if (vm
.count("arg1") == 0) {
3864 if (vm
.count("arg2") == 0 || arg2
== "-") {
3865 // Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
3866 if (isatty(STDIN_FILENO
)) {
3867 cerr
<< "stdin is a tty and no file specified" << std::endl
;
3873 fd
= open(arg2
.c_str(), O_RDONLY
|O_LARGEFILE
, 0666);
3875 cerr
<< "open " << arg2
<< " " << cpp_strerror(errno
) << std::endl
;
3880 ret
= do_set_omap(fs
, coll
, ghobj
, arg1
, fd
, *osr
);
3881 if (fd
!= STDIN_FILENO
)
3884 } else if (objcmd
== "rm-omap") {
3885 if (vm
.count("arg1") == 0) {
3890 ret
= do_rm_omap(fs
, coll
, ghobj
, arg1
, *osr
);
3892 } else if (objcmd
== "get-omaphdr") {
3893 if (vm
.count("arg1")) {
3898 ret
= do_get_omaphdr(fs
, coll
, ghobj
);
3900 } else if (objcmd
== "set-omaphdr") {
3902 if (vm
.count("arg2")) {
3908 if (vm
.count("arg1") == 0 || arg1
== "-") {
3909 // Since read_fd() doesn't handle ^D from a tty stdin, don't allow it.
3910 if (isatty(STDIN_FILENO
)) {
3911 cerr
<< "stdin is a tty and no file specified" << std::endl
;
3917 fd
= open(arg1
.c_str(), O_RDONLY
|O_LARGEFILE
, 0666);
3919 cerr
<< "open " << arg1
<< " " << cpp_strerror(errno
) << std::endl
;
3924 ret
= do_set_omaphdr(fs
, coll
, ghobj
, fd
, *osr
);
3925 if (fd
!= STDIN_FILENO
)
3928 } else if (objcmd
== "dump") {
3929 // There should not be any other arguments
3930 if (vm
.count("arg1") || vm
.count("arg2")) {
3935 ret
= print_obj_info(fs
, coll
, ghobj
, formatter
);
3937 } else if (objcmd
== "corrupt-info") { // Undocumented testing feature
3938 // There should not be any other arguments
3939 if (vm
.count("arg1") || vm
.count("arg2")) {
3944 ret
= corrupt_info(fs
, coll
, ghobj
, formatter
, *osr
);
3946 } else if (objcmd
== "set-size" || objcmd
== "corrupt-size") {
3947 // Undocumented testing feature
3948 bool corrupt
= (objcmd
== "corrupt-size");
3950 if (vm
.count("arg1") == 0 || vm
.count("arg2")) {
3955 if (arg1
.length() == 0 || !isdigit(arg1
.c_str()[0])) {
3956 cerr
<< "Invalid size '" << arg1
<< "' specified" << std::endl
;
3960 uint64_t size
= atoll(arg1
.c_str());
3961 ret
= set_size(fs
, coll
, ghobj
, size
, formatter
, *osr
, corrupt
);
3963 } else if (objcmd
== "clear-snapset") {
3964 // UNDOCUMENTED: For testing zap SnapSet
3965 // IGNORE extra args since not in usage anyway
3966 if (!ghobj
.hobj
.has_snapset()) {
3967 cerr
<< "'" << objcmd
<< "' requires a head or snapdir object" << std::endl
;
3971 ret
= clear_snapset(fs
, coll
, ghobj
, arg1
, *osr
);
3973 } else if (objcmd
== "remove-clone-metadata") {
3975 if (vm
.count("arg1") == 0 || vm
.count("arg2")) {
3980 if (!ghobj
.hobj
.has_snapset()) {
3981 cerr
<< "'" << objcmd
<< "' requires a head or snapdir object" << std::endl
;
3985 if (arg1
.length() == 0 || !isdigit(arg1
.c_str()[0])) {
3986 cerr
<< "Invalid cloneid '" << arg1
<< "' specified" << std::endl
;
3990 snapid_t cloneid
= atoi(arg1
.c_str());
3991 ret
= remove_clone(fs
, coll
, ghobj
, cloneid
, force
, *osr
);
3994 cerr
<< "Unknown object command '" << objcmd
<< "'" << std::endl
;
4002 ret
= PG::peek_map_epoch(fs
, pgid
, &map_epoch
, &bl
);
4004 cerr
<< "peek_map_epoch reports error" << std::endl
;
4006 cerr
<< "map_epoch " << map_epoch
<< std::endl
;
4008 pg_info_t
info(pgid
);
4009 PastIntervals past_intervals
;
4011 ret
= PG::read_info(fs
, pgid
, coll
, bl
, info
, past_intervals
,
4014 cerr
<< "read_info error " << cpp_strerror(ret
) << std::endl
;
4017 if (struct_ver
< PG::compat_struct_v
) {
4018 cerr
<< "PG is too old to upgrade, use older Ceph version" << std::endl
;
4023 cerr
<< "struct_v " << (int)struct_ver
<< std::endl
;
4025 if (op
== "export" || op
== "export-remove") {
4026 ret
= tool
.do_export(fs
, coll
, pgid
, info
, map_epoch
, struct_ver
, superblock
, past_intervals
);
4028 cerr
<< "Export successful" << std::endl
;
4029 if (op
== "export-remove") {
4030 ret
= initiate_new_remove_pg(fs
, pgid
, *osr
);
4031 // Export succeeded, so pgid is there
4033 cerr
<< "Remove successful" << std::endl
;
4036 } else if (op
== "info") {
4037 formatter
->open_object_section("info");
4038 info
.dump(formatter
);
4039 formatter
->close_section();
4040 formatter
->flush(cout
);
4042 } else if (op
== "log") {
4043 PGLog::IndexedLog log
;
4044 pg_missing_t missing
;
4045 ret
= get_log(fs
, struct_ver
, coll
, pgid
, info
, log
, missing
);
4049 dump_log(formatter
, cout
, log
, missing
);
4050 } else if (op
== "rm-past-intervals") {
4051 ObjectStore::Transaction tran
;
4052 ObjectStore::Transaction
*t
= &tran
;
4054 if (struct_ver
< PG::compat_struct_v
) {
4055 cerr
<< "Can't remove past-intervals, version mismatch " << (int)struct_ver
4056 << " (pg) < compat " << (int)PG::compat_struct_v
<< " (tool)"
4062 cout
<< "Remove past-intervals " << past_intervals
<< std::endl
;
4064 past_intervals
.clear();
4069 ret
= write_info(*t
, map_epoch
, info
, past_intervals
);
4072 fs
->apply_transaction(osr
, std::move(*t
));
4073 cout
<< "Removal succeeded" << std::endl
;
4075 } else if (op
== "mark-complete") {
4076 ObjectStore::Transaction tran
;
4077 ObjectStore::Transaction
*t
= &tran
;
4079 if (struct_ver
< PG::compat_struct_v
) {
4080 cerr
<< "Can't mark-complete, version mismatch " << (int)struct_ver
4081 << " (pg) < compat " << (int)PG::compat_struct_v
<< " (tool)"
4087 cout
<< "Marking complete " << std::endl
;
4089 info
.last_update
= eversion_t(superblock
.current_epoch
, info
.last_update
.version
+ 1);
4090 info
.last_backfill
= hobject_t::get_max();
4091 info
.last_epoch_started
= superblock
.current_epoch
;
4092 info
.history
.last_epoch_started
= superblock
.current_epoch
;
4093 info
.history
.last_epoch_clean
= superblock
.current_epoch
;
4094 past_intervals
.clear();
4097 ret
= write_info(*t
, map_epoch
, info
, past_intervals
);
4100 fs
->apply_transaction(osr
, std::move(*t
));
4102 cout
<< "Marking complete succeeded" << std::endl
;
4103 } else if (op
== "trim-pg-log") {
4104 ret
= do_trim_pg_log(fs
, coll
, info
, pgid
, *osr
,
4105 map_epoch
, past_intervals
);
4107 cerr
<< "Error trimming pg log: " << cpp_strerror(ret
) << std::endl
;
4110 cout
<< "Finished trimming pg log" << std::endl
;
4113 assert(!"Should have already checked for valid --op");
4116 cerr
<< "PG '" << pgid
<< "' not found" << std::endl
;
4121 int r
= fs
->umount();
4124 cerr
<< "umount failed: " << cpp_strerror(r
) << std::endl
;
4125 // If no previous error, then use umount() error
4131 // Export output can go to stdout, so put this message on stderr
4133 cerr
<< "dry-run: Nothing changed" << std::endl
;
4135 cout
<< "dry-run: Nothing changed" << std::endl
;