#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/parsers.hpp>
+#include <boost/algorithm/string.hpp>
#include <boost/scoped_ptr.hpp>
#include <boost/optional.hpp>
+#include <fstream>
#include <stdlib.h>
#include "include/compat.h"
#include "include/util.h"
+using namespace std;
namespace po = boost::program_options;
#ifdef INTERNAL_TEST
int _action_on_all_objects_in_pg(ObjectStore *store, coll_t coll, action_on_object_t &action, bool debug)
{
auto ch = store->open_collection(coll);
+
unsigned LIST_AT_A_TIME = 100;
ghobject_t next;
while (!next.is_max()) {
for (vector<ghobject_t>::iterator obj = list.begin();
obj != list.end();
++obj) {
- if (obj->is_pgmeta())
- continue;
object_info_t oi;
if (coll != coll_t::meta()) {
bufferlist attr;
int r = 0;
vector<coll_t> colls_to_check;
vector<coll_t> candidates;
+
r = store->list_collections(candidates);
if (r < 0) {
cerr << "Error listing collections: " << cpp_strerror(r) << std::endl;
i != candidates.end();
++i) {
spg_t cand_pgid;
+ if (i->is_meta() && pgidstr == "meta") {
+ colls_to_check.push_back(*i);
+ continue;
+ }
if (!i->is_pg(&cand_pgid))
continue;
return 0;
}
-int get_log(ObjectStore *fs, __u8 struct_ver,
+int get_log(CephContext *cct, ObjectStore *fs, __u8 struct_ver,
spg_t pgid, const pg_info_t &info,
PGLog::IndexedLog &log, pg_missing_t &missing)
{
ostringstream oss;
ceph_assert(struct_ver > 0);
PGLog::read_log_and_missing(
- fs, ch,
+ cct, fs, ch,
pgid.make_pgmeta_oid(),
info, log, missing,
oss,
int ret = write_info(t, epoch, info, past_intervals);
if (ret)
return ret;
+
coll_t coll(info.pgid);
map<string,bufferlist> km;
-
+ const bool require_rollback = !info.pgid.is_no_shard();
if (!divergent.empty()) {
ceph_assert(missing.get_items().empty());
PGLog::write_log_and_missing_wo_missing(
- t, &km, log, coll, info.pgid.make_pgmeta_oid(), divergent, true);
+ t, &km, log, coll, info.pgid.make_pgmeta_oid(), divergent,
+ require_rollback);
} else {
pg_missing_tracker_t tmissing(missing);
bool rebuilt_missing_set_with_deletes = missing.may_include_deletes;
PGLog::write_log_and_missing(
- t, &km, log, coll, info.pgid.make_pgmeta_oid(), tmissing, true,
+ t, &km, log, coll, info.pgid.make_pgmeta_oid(), tmissing,
+ require_rollback,
&rebuilt_missing_set_with_deletes);
}
t.omap_setkeys(coll, info.pgid.make_pgmeta_oid(), km);
try {
e.decode_with_checksum(bp);
} catch (const buffer::error &e) {
- cerr << "Error reading pg log entry: " << e << std::endl;
+ cerr << "Error reading pg log entry: " << e.what() << std::endl;
}
if (debug) {
cerr << "read entry " << e << std::endl;
return 0;
}
+int do_trim_pg_log_dups(ObjectStore *store, const coll_t &coll,
+ pg_info_t &info, const spg_t &pgid,
+ epoch_t map_epoch,
+ PastIntervals &past_intervals)
+{
+ ghobject_t oid = pgid.make_pgmeta_oid();
+ struct stat st;
+ auto ch = store->open_collection(coll);
+ int r = store->stat(ch, oid, &st);
+ ceph_assert(r == 0);
+ ceph_assert(st.st_size == 0);
+
+ const size_t max_dup_entries = g_ceph_context->_conf->osd_pg_log_dups_tracked;
+ ceph_assert(max_dup_entries > 0);
+ const size_t max_chunk_size = g_ceph_context->_conf->osd_pg_log_trim_max;
+ ceph_assert(max_chunk_size > 0);
+
+ cout << "max_dup_entries=" << max_dup_entries
+ << " max_chunk_size=" << max_chunk_size << std::endl;
+ if (dry_run) {
+ cout << "Dry run enabled, so when many chunks are needed,"
+ << " the trimming will never stop!" << std::endl;
+ }
+
+ set<string> keys_to_keep;
+ size_t num_removed = 0;
+ do {
+ set<string> keys_to_trim;
+ {
+ ObjectMap::ObjectMapIterator p = store->get_omap_iterator(ch, oid);
+ if (!p)
+ break;
+ for (p->seek_to_first(); p->valid(); p->next()) {
+ if (p->key()[0] == '_')
+ continue;
+ if (p->key() == "can_rollback_to")
+ continue;
+ if (p->key() == "divergent_priors")
+ continue;
+ if (p->key() == "rollback_info_trimmed_to")
+ continue;
+ if (p->key() == "may_include_deletes_in_missing")
+ continue;
+ if (p->key().substr(0, 7) == string("missing"))
+ continue;
+ if (p->key().substr(0, 4) != string("dup_"))
+ continue;
+ keys_to_keep.insert(p->key());
+ if (keys_to_keep.size() > max_dup_entries) {
+ auto oldest_to_keep = keys_to_keep.begin();
+ keys_to_trim.emplace(*oldest_to_keep);
+ keys_to_keep.erase(oldest_to_keep);
+ }
+ if (keys_to_trim.size() >= max_chunk_size) {
+ break;
+ }
+ }
+ } // deconstruct ObjectMapIterator
+ // delete the keys
+ num_removed = keys_to_trim.size();
+ if (!dry_run && !keys_to_trim.empty()) {
+ cout << "Removing keys " << *keys_to_trim.begin() << " - " << *keys_to_trim.rbegin() << std::endl;
+ ObjectStore::Transaction t;
+ t.omap_rmkeys(coll, oid, keys_to_trim);
+ store->queue_transaction(ch, std::move(t));
+ ch->flush();
+ }
+ } while (num_removed == max_chunk_size);
+
+ // compact the db since we just removed a bunch of data
+ cerr << "Finished trimming, now compacting..." << std::endl;
+ if (!dry_run)
+ store->compact();
+ return 0;
+}
+
const int OMAP_BATCH_SIZE = 25;
void get_omap_batch(ObjectMap::ObjectMapIterator &iter, map<string, bufferlist> &oset)
{
}
//Handle attrs for this object
- map<string,bufferptr> aset;
+ map<string,bufferptr,less<>> aset;
ret = store->getattrs(ch, obj, aset);
if (ret) return ret;
attr_section as(aset);
return get_osdmap(store, ms.map_epoch, ms.osdmap, ms.osdmap_bl);
}
-int ObjectStoreTool::do_export(ObjectStore *fs, coll_t coll, spg_t pgid,
+int ObjectStoreTool::do_export(
+ CephContext *cct, ObjectStore *fs, coll_t coll, spg_t pgid,
pg_info_t &info, epoch_t map_epoch, __u8 struct_ver,
const OSDSuperblock& superblock,
PastIntervals &past_intervals)
cerr << "Exporting " << pgid << " info " << info << std::endl;
- int ret = get_log(fs, struct_ver, pgid, info, log, missing);
+ int ret = get_log(cct, fs, struct_ver, pgid, info, log, missing);
if (ret > 0)
return ret;
int do_list_attrs(ObjectStore *store, coll_t coll, ghobject_t &ghobj)
{
auto ch = store->open_collection(coll);
- map<string,bufferptr> aset;
+ map<string,bufferptr,less<>> aset;
int r = store->getattrs(ch, ghobj, aset);
if (r < 0) {
cerr << "getattrs: " << cpp_strerror(r) << std::endl;
ObjectStore::Transaction t;
t.touch(cid, oid);
- map<string,bufferptr> attrs;
+ map<string,bufferptr,less<>> attrs;
src->getattrs(ch, oid, attrs);
if (!attrs.empty()) {
t.setattrs(cid, oid, attrs);
return r;
}
+
+const int ceph_entity_name_type(const string name)
+{
+ if (name == "mds") return CEPH_ENTITY_TYPE_MDS;
+ if (name == "osd") return CEPH_ENTITY_TYPE_OSD;
+ if (name == "mon") return CEPH_ENTITY_TYPE_MON;
+ if (name == "client") return CEPH_ENTITY_TYPE_CLIENT;
+ if (name == "mgr") return CEPH_ENTITY_TYPE_MGR;
+ if (name == "auth") return CEPH_ENTITY_TYPE_AUTH;
+ return -1;
+}
+
+eversion_t get_eversion_from_str(const string& s) {
+ eversion_t e;
+ vector<string> result;
+ boost::split(result, s, boost::is_any_of("'"));
+ if (result.size() != 2) {
+ cerr << "eversion_t: invalid format: '" << s << "'" << std::endl;
+ return e;
+ }
+ e.epoch = atoi(result[0].c_str());
+ e.version = atoi(result[1].c_str());
+ return e;
+}
+
+osd_reqid_t get_reqid_from_str(const string& s) {
+ osd_reqid_t reqid;
+
+ vector<string> result;
+ boost::split(result, s, boost::is_any_of(".:"));
+ if (result.size() != 4) {
+ cerr << "reqid: invalid format " << s << std::endl;
+ return osd_reqid_t();
+ }
+ reqid.name._type = ceph_entity_name_type(result[0]);
+ reqid.name._num = atoi(result[1].c_str());
+
+ reqid.inc = atoi(result[2].c_str());
+ reqid.tid = atoi(result[3].c_str());
+ return reqid;
+}
+
+void do_dups_inject_transction(ObjectStore *store, spg_t r_pgid, map<string,bufferlist> *new_dups)
+{
+ ObjectStore::Transaction t;
+ coll_t coll(r_pgid);
+ cerr << "injecting dups into pgid:" << r_pgid << " num of dups:" << new_dups->size() << std::endl;
+ t.omap_setkeys(coll, r_pgid.make_pgmeta_oid(), (*new_dups));
+ auto ch = store->open_collection(coll);
+ store->queue_transaction(ch, std::move(t));
+ new_dups->clear();
+}
+
+int do_dups_inject_object(ObjectStore *store, spg_t r_pgid, json_spirit::mObject &in_json_obj,
+ map<string,bufferlist> *new_dups, bool debug) {
+ std::map<std::string, json_spirit::mValue>::const_iterator it = in_json_obj.find("generate");
+ int32_t generate = 0;
+ if (it != in_json_obj.end()) {
+ generate = atoi(it->second.get_str().c_str());
+ }
+
+ it = in_json_obj.find("reqid");
+ if (it == in_json_obj.end()) {
+ return 1;
+ }
+ osd_reqid_t reqid(get_reqid_from_str(it->second.get_str()));
+ it = in_json_obj.find("version");
+ if (it == in_json_obj.end()) {
+ return 1;
+ }
+ eversion_t version(get_eversion_from_str(it->second.get_str()));
+ it = in_json_obj.find("user_version");
+ if (it == in_json_obj.end()) {
+ return 1;
+ }
+ version_t user_version = atoi(it->second.get_str().c_str());
+ it = in_json_obj.find("return_code");
+ if (it == in_json_obj.end()) {
+ return 1;
+ }
+ int32_t return_code = atoi(it->second.get_str().c_str());
+ if (generate) {
+ for(auto i = 0; i < generate; ++i) {
+ version.version++;
+ if (debug) {
+ cout << "generate dups reqid " << reqid << " v=" << version << std::endl;
+ }
+ pg_log_dup_t tmp(version, user_version, reqid, return_code);
+ bufferlist bl;
+ encode(tmp, bl);
+ (*new_dups)[tmp.get_key_name()] = std::move(bl);
+ if ( new_dups->size() > 50000 ) {
+ do_dups_inject_transction(store, r_pgid, new_dups);
+ cout << "inject of " << i << " dups into pgid:" << r_pgid << " done..." << std::endl;
+ }
+ }
+ return 0;
+ } else {
+ pg_log_dup_t tmp(version, user_version, reqid, return_code);
+ if (debug) {
+ cout << "adding dup: " << tmp << "into key:" << tmp.get_key_name() << std::endl;
+ }
+ bufferlist bl;
+ encode(tmp, bl);
+ (*new_dups)[tmp.get_key_name()] = std::move(bl);
+ }
+ return 0;
+}
+
+void do_dups_inject_from_json(ObjectStore *store, spg_t r_pgid, json_spirit::mValue &inJson, bool debug)
+{
+ map<string,bufferlist> new_dups;
+ const vector<json_spirit::mValue>& o = inJson.get_array();
+ for (const auto& obj : o) {
+ if (obj.type() == json_spirit::obj_type) {
+ json_spirit::mObject Mobj = obj.get_obj();
+ do_dups_inject_object(store, r_pgid, Mobj, &new_dups, debug);
+ } else {
+ throw std::runtime_error("JSON array/object not allowed type:" + std::to_string(obj.type()));
+ return;
+ }
+ }
+ if (new_dups.size() > 0) {
+ do_dups_inject_transction(store, r_pgid, &new_dups);
+ }
+
+
+ return ;
+}
+
void usage(po::options_description &desc)
{
cerr << std::endl;
("journal-path", po::value<string>(&jpath),
"path to journal, use if tool can't find it")
("pgid", po::value<string>(&pgidstr),
- "PG id, mandatory for info, log, remove, export, export-remove, mark-complete, trim-pg-log, and mandatory for apply-layout-settings if --pool is not specified")
+ "PG id, mandatory for info, log, remove, export, export-remove, mark-complete, trim-pg-log, trim-pg-log-dups and mandatory for apply-layout-settings if --pool is not specified")
("pool", po::value<string>(&pool),
"Pool name, mandatory for apply-layout-settings if --pgid is not specified")
("op", po::value<string>(&op),
"Arg is one of [info, log, remove, mkfs, fsck, repair, fuse, dup, export, export-remove, import, list, list-slow-omap, fix-lost, list-pgs, dump-journal, dump-super, meta-list, "
- "get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, apply-layout-settings, update-mon-db, dump-export, trim-pg-log, statfs]")
+ "get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, apply-layout-settings, update-mon-db, dump-export, trim-pg-log, trim-pg-log-dups statfs]")
("epoch", po::value<unsigned>(&epoch),
"epoch# for get-osdmap and get-inc-osdmap, the current epoch in use if not specified")
("file", po::value<string>(&file),
} else {
file_fd = open(file.c_str(), O_WRONLY|O_CREAT|O_TRUNC, 0666);
}
- } else if (op == "import" || op == "dump-export" || op == "set-osdmap" || op == "set-inc-osdmap") {
+ } else if (op == "import" || op == "dump-export" || op == "set-osdmap" || op == "set-inc-osdmap" || op == "pg-log-inject-dups") {
if (!vm.count("file") || file == "-") {
if (isatty(STDIN_FILENO)) {
cerr << "stdin is a tty and no --file filename specified" << std::endl;
return 1;
}
- if (pgidstr.length() && !pgid.parse(pgidstr.c_str())) {
+ if (pgidstr.length() && pgidstr != "meta" && !pgid.parse(pgidstr.c_str())) {
cerr << "Invalid pgid '" << pgidstr << "' specified" << std::endl;
return 1;
}
}
}
- ObjectStore *fs = ObjectStore::create(g_ceph_context, type, dpath, jpath, flags);
- if (fs == NULL) {
+ std::unique_ptr<ObjectStore> fs = ObjectStore::create(g_ceph_context, type, dpath, jpath, flags);
+ if (!fs) {
cerr << "Unable to create store of type " << type << std::endl;
return 1;
}
target_type = string(bl.c_str(), bl.length() - 1); // drop \n
}
::close(fd);
- ObjectStore *targetfs = ObjectStore::create(
+ unique_ptr<ObjectStore> targetfs = ObjectStore::create(
g_ceph_context, target_type,
target_data_path, "", 0);
- if (targetfs == NULL) {
+ if (!targetfs) {
cerr << "Unable to open store of type " << target_type << std::endl;
return 1;
}
- int r = dup(dpath, fs, target_data_path, targetfs);
+ int r = dup(dpath, fs.get(), target_data_path, targetfs.get());
if (r < 0) {
cerr << "dup failed: " << cpp_strerror(r) << std::endl;
return 1;
if (op == "fuse") {
#ifdef HAVE_LIBFUSE
- FuseStore fuse(fs, mountpoint);
+ FuseStore fuse(fs.get(), mountpoint);
cout << "mounting fuse at " << mountpoint << " ..." << std::endl;
int r = fuse.main();
+ fs->umount();
if (r < 0) {
cerr << "failed to mount fuse: " << cpp_strerror(r) << std::endl;
return 1;
target_level = atoi(arg1.c_str());
}
ceph_assert(superblock != nullptr);
- ret = apply_layout_settings(fs, *superblock, pool, pgid, dry_run, target_level);
+ ret = apply_layout_settings(fs.get(), *superblock, pool, pgid, dry_run, target_level);
goto out;
}
if (vm.count("objcmd") && (objcmd == "remove-clone-metadata"))
head = true;
lookup_ghobject lookup(object, nspace, head);
- if (pgidstr.length())
- ret = action_on_all_objects_in_exact_pg(fs, coll_t(pgid), lookup, debug);
+ if (pgidstr == "meta")
+ ret = action_on_all_objects_in_exact_pg(fs.get(), coll_t::meta(), lookup, debug);
+ else if (pgidstr.length())
+ ret = action_on_all_objects_in_exact_pg(fs.get(), coll_t(pgid), lookup, debug);
else
- ret = action_on_all_objects(fs, lookup, debug);
+ ret = action_on_all_objects(fs.get(), lookup, debug);
if (ret) {
throw std::runtime_error("Internal error");
} else {
if ((op == "info" || op == "log" || op == "remove" || op == "export"
|| op == "export-remove" || op == "mark-complete"
|| op == "reset-last-complete"
- || op == "trim-pg-log") &&
+ || op == "trim-pg-log"
+ || op == "pg-log-inject-dups") &&
pgidstr.length() == 0) {
cerr << "Must provide pgid" << std::endl;
usage(desc);
if (op == "import") {
ceph_assert(superblock != nullptr);
try {
- ret = tool.do_import(fs, *superblock, force, pgidstr);
+ ret = tool.do_import(fs.get(), *superblock, force, pgidstr);
}
catch (const buffer::error &e) {
cerr << "do_import threw exception error " << e.what() << std::endl;
ceph_assert(superblock != nullptr);
epoch = superblock->current_epoch;
}
- ret = get_osdmap(fs, epoch, osdmap, bl);
+ ret = get_osdmap(fs.get(), epoch, osdmap, bl);
if (ret) {
cerr << "Failed to get osdmap#" << epoch << ": "
<< cpp_strerror(ret) << std::endl;
if (ret < 0) {
cerr << "Failed to read osdmap " << cpp_strerror(ret) << std::endl;
} else {
- ret = set_osdmap(fs, epoch, bl, force);
+ ret = set_osdmap(fs.get(), epoch, bl, force);
}
goto out;
} else if (op == "get-inc-osdmap") {
ceph_assert(superblock != nullptr);
epoch = superblock->current_epoch;
}
- ret = get_inc_osdmap(fs, epoch, bl);
+ ret = get_inc_osdmap(fs.get(), epoch, bl);
if (ret < 0) {
cerr << "Failed to get incremental osdmap# " << epoch << ": "
<< cpp_strerror(ret) << std::endl;
cerr << "Failed to read incremental osdmap " << cpp_strerror(ret) << std::endl;
goto out;
} else {
- ret = set_inc_osdmap(fs, epoch, bl, force);
+ ret = set_inc_osdmap(fs.get(), epoch, bl, force);
}
goto out;
} else if (op == "update-mon-db") {
ret = -EINVAL;
goto out;
}
- ret = initiate_new_remove_pg(fs, pgid);
+ ret = initiate_new_remove_pg(fs.get(), pgid);
if (ret < 0) {
cerr << "PG '" << pgid << "' not found" << std::endl;
goto out;
boost::scoped_ptr<action_on_object_t> action;
action.reset(new do_fix_lost());
if (pgidstr.length())
- ret = action_on_all_objects_in_exact_pg(fs, coll_t(pgid), *action, debug);
+ ret = action_on_all_objects_in_exact_pg(fs.get(), coll_t(pgid), *action, debug);
else
- ret = action_on_all_objects(fs, *action, debug);
+ ret = action_on_all_objects(fs.get(), *action, debug);
goto out;
}
if (op == "list") {
- ret = do_list(fs, pgidstr, object, nspace, formatter, debug,
+ ret = do_list(fs.get(), pgidstr, object, nspace, formatter, debug,
human_readable, head);
if (ret < 0) {
cerr << "do_list failed: " << cpp_strerror(ret) << std::endl;
goto out;
}
if (op == "list-slow-omap") {
- ret = do_list_slow(fs, pgidstr, object, slow_threshold, formatter, debug,
+ ret = do_list_slow(fs.get(), pgidstr, object, slow_threshold, formatter, debug,
human_readable);
if (ret < 0) {
cerr << "do_list failed: " << cpp_strerror(ret) << std::endl;
}
if (op == "meta-list") {
- ret = do_meta(fs, object, formatter, debug, human_readable);
+ ret = do_meta(fs.get(), object, formatter, debug, human_readable);
if (ret < 0) {
cerr << "do_meta failed: " << cpp_strerror(ret) << std::endl;
}
// If not an object command nor any of the ops handled below, then output this usage
// before complaining about a bad pgid
- if (!vm.count("objcmd") && op != "export" && op != "export-remove" && op != "info" && op != "log" && op != "mark-complete" && op != "trim-pg-log") {
+ if (!vm.count("objcmd") && op != "export" && op != "export-remove" && op != "info" && op != "log" && op != "mark-complete" && op != "trim-pg-log" && op != "trim-pg-log-dups" && op != "pg-log-inject-dups") {
cerr << "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, dump-journal, dump-super, meta-list, "
- "get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, dump-export, trim-pg-log, statfs)"
+ "get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, dump-export, trim-pg-log, trim-pg-log-dups statfs)"
<< std::endl;
usage(desc);
ret = 1;
type = NOSNAPMAP;
else if (rmtypestr == "snapmap")
type = SNAPMAP;
- ret = do_remove_object(fs, coll, ghobj, all, force, type);
+ ret = do_remove_object(fs.get(), coll, ghobj, all, force, type);
goto out;
} else if (objcmd == "list-attrs") {
- ret = do_list_attrs(fs, coll, ghobj);
+ ret = do_list_attrs(fs.get(), coll, ghobj);
goto out;
} else if (objcmd == "list-omap") {
- ret = do_list_omap(fs, coll, ghobj);
+ ret = do_list_omap(fs.get(), coll, ghobj);
goto out;
} else if (objcmd == "get-bytes" || objcmd == "set-bytes") {
if (objcmd == "get-bytes") {
goto out;
}
}
- ret = do_get_bytes(fs, coll, ghobj, fd);
+ ret = do_get_bytes(fs.get(), coll, ghobj, fd);
if (fd != STDOUT_FILENO)
close(fd);
} else {
goto out;
}
}
- ret = do_set_bytes(fs, coll, ghobj, fd);
+ ret = do_set_bytes(fs.get(), coll, ghobj, fd);
if (fd != STDIN_FILENO)
close(fd);
}
ret = 1;
goto out;
}
- ret = do_get_attr(fs, coll, ghobj, arg1);
+ ret = do_get_attr(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "set-attr") {
if (vm.count("arg1") == 0) {
goto out;
}
}
- ret = do_set_attr(fs, coll, ghobj, arg1, fd);
+ ret = do_set_attr(fs.get(), coll, ghobj, arg1, fd);
if (fd != STDIN_FILENO)
close(fd);
goto out;
ret = 1;
goto out;
}
- ret = do_rm_attr(fs, coll, ghobj, arg1);
+ ret = do_rm_attr(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "get-omap") {
if (vm.count("arg1") == 0) {
ret = 1;
goto out;
}
- ret = do_get_omap(fs, coll, ghobj, arg1);
+ ret = do_get_omap(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "set-omap") {
if (vm.count("arg1") == 0) {
goto out;
}
}
- ret = do_set_omap(fs, coll, ghobj, arg1, fd);
+ ret = do_set_omap(fs.get(), coll, ghobj, arg1, fd);
if (fd != STDIN_FILENO)
close(fd);
goto out;
ret = 1;
goto out;
}
- ret = do_rm_omap(fs, coll, ghobj, arg1);
+ ret = do_rm_omap(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "get-omaphdr") {
if (vm.count("arg1")) {
ret = 1;
goto out;
}
- ret = do_get_omaphdr(fs, coll, ghobj);
+ ret = do_get_omaphdr(fs.get(), coll, ghobj);
goto out;
} else if (objcmd == "set-omaphdr") {
// Extra arg
goto out;
}
}
- ret = do_set_omaphdr(fs, coll, ghobj, fd);
+ ret = do_set_omaphdr(fs.get(), coll, ghobj, fd);
if (fd != STDIN_FILENO)
close(fd);
goto out;
ret = 1;
goto out;
}
- ret = print_obj_info(fs, coll, ghobj, formatter);
+ ret = print_obj_info(fs.get(), coll, ghobj, formatter);
goto out;
} else if (objcmd == "corrupt-info") { // Undocumented testing feature
// There should not be any other arguments
ret = 1;
goto out;
}
- ret = corrupt_info(fs, coll, ghobj, formatter);
+ ret = corrupt_info(fs.get(), coll, ghobj, formatter);
goto out;
} else if (objcmd == "set-size" || objcmd == "corrupt-size") {
// Undocumented testing feature
goto out;
}
uint64_t size = atoll(arg1.c_str());
- ret = set_size(fs, coll, ghobj, size, formatter, corrupt);
+ ret = set_size(fs.get(), coll, ghobj, size, formatter, corrupt);
goto out;
} else if (objcmd == "clear-data-digest") {
- ret = clear_data_digest(fs, coll, ghobj);
+ ret = clear_data_digest(fs.get(), coll, ghobj);
goto out;
} else if (objcmd == "clear-snapset") {
// UNDOCUMENTED: For testing zap SnapSet
ret = 1;
goto out;
}
- ret = clear_snapset(fs, coll, ghobj, arg1);
+ ret = clear_snapset(fs.get(), coll, ghobj, arg1);
goto out;
} else if (objcmd == "remove-clone-metadata") {
// Extra arg
goto out;
}
snapid_t cloneid = atoi(arg1.c_str());
- ret = remove_clone(fs, coll, ghobj, cloneid, force);
+ ret = remove_clone(fs.get(), coll, ghobj, cloneid, force);
goto out;
}
cerr << "Unknown object command '" << objcmd << "'" << std::endl;
}
map_epoch = 0;
- ret = PG::peek_map_epoch(fs, pgid, &map_epoch);
+ ret = PG::peek_map_epoch(fs.get(), pgid, &map_epoch);
if (ret < 0)
cerr << "peek_map_epoch reports error" << std::endl;
if (debug)
pg_info_t info(pgid);
PastIntervals past_intervals;
__u8 struct_ver;
- ret = PG::read_info(fs, pgid, coll, info, past_intervals, struct_ver);
+ ret = PG::read_info(fs.get(), pgid, coll, info, past_intervals, struct_ver);
if (ret < 0) {
cerr << "read_info error " << cpp_strerror(ret) << std::endl;
goto out;
if (op == "export" || op == "export-remove") {
ceph_assert(superblock != nullptr);
- ret = tool.do_export(fs, coll, pgid, info, map_epoch, struct_ver, *superblock, past_intervals);
+ ret = tool.do_export(cct.get(), fs.get(), coll, pgid, info, map_epoch, struct_ver, *superblock, past_intervals);
if (ret == 0) {
cerr << "Export successful" << std::endl;
if (op == "export-remove") {
- ret = initiate_new_remove_pg(fs, pgid);
+ ret = initiate_new_remove_pg(fs.get(), pgid);
// Export succeeded, so pgid is there
ceph_assert(ret == 0);
cerr << "Remove successful" << std::endl;
} else if (op == "log") {
PGLog::IndexedLog log;
pg_missing_t missing;
- ret = get_log(fs, struct_ver, pgid, info, log, missing);
+ ret = get_log(cct.get(), fs.get(), struct_ver, pgid, info, log, missing);
if (ret < 0)
goto out;
}
cout << "Marking complete succeeded" << std::endl;
} else if (op == "trim-pg-log") {
- ret = do_trim_pg_log(fs, coll, info, pgid,
+ ret = do_trim_pg_log(fs.get(), coll, info, pgid,
map_epoch, past_intervals);
if (ret < 0) {
cerr << "Error trimming pg log: " << cpp_strerror(ret) << std::endl;
}
cout << "Finished trimming pg log" << std::endl;
goto out;
+ } else if (op == "trim-pg-log-dups") {
+ ret = do_trim_pg_log_dups(fs.get(), coll, info, pgid,
+ map_epoch, past_intervals);
+ if (ret < 0) {
+ cerr << "Error trimming pg log dups: " << cpp_strerror(ret) << std::endl;
+ goto out;
+ }
+ cout << "Finished trimming pg log dups" << std::endl;
+ goto out;
} else if (op == "reset-last-complete") {
if (!force) {
std::cerr << "WARNING: reset-last-complete is extremely dangerous and almost "
}
cout << "Reseting last_complete succeeded" << std::endl;
+ } else if (op == "pg-log-inject-dups") {
+ if (!vm.count("file") || file == "-") {
+ cerr << "Must provide file containing JSON dups entries" << std::endl;
+ ret = 1;
+ goto out;
+ }
+ if (debug)
+ cerr << "opening file " << file << std::endl;
+
+ ifstream json_file_stream(file , std::ifstream::in);
+ if (!json_file_stream.is_open()) {
+ cerr << "unable to open file " << file << std::endl;
+ ret = -1;
+ goto out;
+ }
+ json_spirit::mValue result;
+ try {
+ if (!json_spirit::read(json_file_stream, result))
+ throw std::runtime_error("unparseable JSON " + file);
+ if (result.type() != json_spirit::array_type) {
+ cerr << "result is not an array_type - type=" << result.type() << std::endl;
+ throw std::runtime_error("not JSON array_type " + file);
+ }
+ do_dups_inject_from_json(fs.get(), pgid, result, debug);
+ } catch (const std::runtime_error &e) {
+ cerr << e.what() << std::endl;;
+ return -1;
+ }
} else {
ceph_assert(!"Should have already checked for valid --op");
}