1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 * Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
9 * This is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License version 2.1, as published by the Free Software
12 * Foundation. See file COPYING.
15 #include "include/compat.h"
16 #include "include/int_types.h"
17 #include "boost/tuple/tuple.hpp"
21 #include <sys/types.h>
27 #include <sys/ioctl.h>
29 #if defined(__linux__)
31 #include <linux/falloc.h>
37 #include "include/linux_fiemap.h"
39 #include "chain_xattr.h"
41 #if defined(__APPLE__) || defined(__FreeBSD__)
42 #include <sys/param.h>
43 #include <sys/mount.h>
50 #include "FileStore.h"
51 #include "GenericFileStoreBackend.h"
52 #include "BtrfsFileStoreBackend.h"
53 #include "XfsFileStoreBackend.h"
54 #include "ZFSFileStoreBackend.h"
55 #include "common/BackTrace.h"
56 #include "include/types.h"
57 #include "FileJournal.h"
59 #include "osd/osd_types.h"
60 #include "include/color.h"
61 #include "include/buffer.h"
63 #include "common/Timer.h"
64 #include "common/debug.h"
65 #include "common/errno.h"
66 #include "common/run_cmd.h"
67 #include "common/safe_io.h"
68 #include "common/perf_counters.h"
69 #include "common/sync_filesystem.h"
70 #include "common/fd.h"
71 #include "HashIndex.h"
72 #include "DBObjectMap.h"
73 #include "kv/KeyValueDB.h"
75 #include "common/ceph_crypto.h"
76 using ceph::crypto::SHA1
;
78 #include "include/ceph_assert.h"
80 #include "common/config.h"
81 #include "common/blkdev.h"
84 #define TRACEPOINT_DEFINE
85 #define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
86 #include "tracing/objectstore.h"
87 #undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
88 #undef TRACEPOINT_DEFINE
90 #define tracepoint(...)
93 #define dout_context cct
94 #define dout_subsys ceph_subsys_filestore
96 #define dout_prefix *_dout << "filestore(" << basedir << ") "
98 #define COMMIT_SNAP_ITEM "snap_%llu"
99 #define CLUSTER_SNAP_ITEM "clustersnap_%s"
101 #define REPLAY_GUARD_XATTR "user.cephos.seq"
102 #define GLOBAL_REPLAY_GUARD_XATTR "user.cephos.gseq"
104 // XATTR_SPILL_OUT_NAME as a xattr is used to maintain that indicates whether
105 // xattrs spill over into DBObjectMap, if XATTR_SPILL_OUT_NAME exists in file
106 // xattrs and the value is "no", it indicates no xattrs in DBObjectMap
107 #define XATTR_SPILL_OUT_NAME "user.cephos.spill_out"
108 #define XATTR_NO_SPILL_OUT "0"
109 #define XATTR_SPILL_OUT "1"
110 #define __FUNC__ __func__ << "(" << __LINE__ << ")"
112 //Initial features in new superblock.
113 static CompatSet
get_fs_initial_compat_set() {
114 CompatSet::FeatureSet ceph_osd_feature_compat
;
115 CompatSet::FeatureSet ceph_osd_feature_ro_compat
;
116 CompatSet::FeatureSet ceph_osd_feature_incompat
;
117 return CompatSet(ceph_osd_feature_compat
, ceph_osd_feature_ro_compat
,
118 ceph_osd_feature_incompat
);
121 //Features are added here that this FileStore supports.
122 static CompatSet
get_fs_supported_compat_set() {
123 CompatSet compat
= get_fs_initial_compat_set();
124 //Any features here can be set in code, but not in initial superblock
125 compat
.incompat
.insert(CEPH_FS_FEATURE_INCOMPAT_SHARDS
);
129 int FileStore::validate_hobject_key(const hobject_t
&obj
) const
131 unsigned len
= LFNIndex::get_max_escaped_name_len(obj
);
132 return len
> m_filestore_max_xattr_value_size
? -ENAMETOOLONG
: 0;
135 int FileStore::get_block_device_fsid(CephContext
* cct
, const string
& path
,
138 // make sure we don't try to use aio or direct_io (and get annoying
139 // error messages from failing to do so); performance implications
140 // should be irrelevant for this use
141 FileJournal
j(cct
, *fsid
, 0, 0, path
.c_str(), false, false);
142 return j
.peek_fsid(*fsid
);
145 void FileStore::FSPerfTracker::update_from_perfcounters(
146 PerfCounters
&logger
)
148 os_commit_latency_ns
.consume_next(
150 l_filestore_journal_latency
));
151 os_apply_latency_ns
.consume_next(
153 l_filestore_apply_latency
));
157 ostream
& operator<<(ostream
& out
, const FileStore::OpSequencer
& s
)
159 return out
<< "osr(" << s
.cid
<< ")";
162 int FileStore::get_cdir(const coll_t
& cid
, char *s
, int len
)
164 const string
&cid_str(cid
.to_str());
165 return snprintf(s
, len
, "%s/current/%s", basedir
.c_str(), cid_str
.c_str());
168 void FileStore::handle_eio()
170 // don't try to map this back to an offset; too hard since there is
171 // a file system in between. we also don't really know whether this
172 // was a read or a write, since we have so many layers beneath us.
174 note_io_error_event(devname
.c_str(), basedir
.c_str(), -EIO
, 0, 0, 0);
175 ceph_abort_msg("unexpected eio error");
178 int FileStore::get_index(const coll_t
& cid
, Index
*index
)
180 int r
= index_manager
.get_index(cid
, basedir
, index
);
181 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
185 int FileStore::init_index(const coll_t
& cid
)
188 get_cdir(cid
, path
, sizeof(path
));
189 int r
= index_manager
.init_index(cid
, path
, target_version
);
190 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
194 int FileStore::lfn_find(const ghobject_t
& oid
, const Index
& index
, IndexedPath
*path
)
200 ceph_assert(index
.index
);
201 r
= (index
.index
)->lookup(oid
, path
, &exist
);
203 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
211 int FileStore::lfn_truncate(const coll_t
& cid
, const ghobject_t
& oid
, off_t length
)
214 int r
= lfn_open(cid
, oid
, false, &fd
);
217 r
= ::ftruncate(**fd
, length
);
220 if (r
>= 0 && m_filestore_sloppy_crc
) {
221 int rc
= backend
->_crc_update_truncate(**fd
, length
);
222 ceph_assert(rc
>= 0);
225 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
229 int FileStore::lfn_stat(const coll_t
& cid
, const ghobject_t
& oid
, struct stat
*buf
)
233 int r
= get_index(cid
, &index
);
237 ceph_assert(index
.index
);
238 std::shared_lock l
{(index
.index
)->access_lock
};
240 r
= lfn_find(oid
, index
, &path
);
243 r
= ::stat(path
->path(), buf
);
249 int FileStore::lfn_open(const coll_t
& cid
,
250 const ghobject_t
& oid
,
257 bool need_lock
= true;
262 if (cct
->_conf
->filestore_odsync_write
) {
270 if (!((*index
).index
)) {
271 r
= get_index(cid
, index
);
273 dout(10) << __FUNC__
<< ": could not get index r = " << r
<< dendl
;
281 ceph_assert((*index
).index
);
283 ((*index
).index
)->access_lock
.lock();
286 *outfd
= fdcache
.lookup(oid
);
289 ((*index
).index
)->access_lock
.unlock();
297 IndexedPath
*path
= &path2
;
299 r
= (*index
)->lookup(oid
, path
, &exist
);
301 derr
<< "could not find " << oid
<< " in index: "
302 << cpp_strerror(-r
) << dendl
;
306 r
= ::open((*path
)->path(), flags
|O_CLOEXEC
, 0644);
309 dout(10) << "error opening file " << (*path
)->path() << " with flags="
310 << flags
<< ": " << cpp_strerror(-r
) << dendl
;
314 if (create
&& (!exist
)) {
315 r
= (*index
)->created(oid
, (*path
)->path());
317 VOID_TEMP_FAILURE_RETRY(::close(fd
));
318 derr
<< "error creating " << oid
<< " (" << (*path
)->path()
319 << ") in index: " << cpp_strerror(-r
) << dendl
;
322 r
= chain_fsetxattr
<true, true>(
323 fd
, XATTR_SPILL_OUT_NAME
,
324 XATTR_NO_SPILL_OUT
, sizeof(XATTR_NO_SPILL_OUT
));
326 VOID_TEMP_FAILURE_RETRY(::close(fd
));
327 derr
<< "error setting spillout xattr for oid " << oid
<< " (" << (*path
)->path()
328 << "):" << cpp_strerror(-r
) << dendl
;
335 *outfd
= fdcache
.add(oid
, fd
, &existed
);
337 TEMP_FAILURE_RETRY(::close(fd
));
340 *outfd
= std::make_shared
<FDCache::FD
>(fd
);
344 ((*index
).index
)->access_lock
.unlock();
352 ((*index
).index
)->access_lock
.unlock();
355 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
359 void FileStore::lfn_close(FDRef fd
)
363 int FileStore::lfn_link(const coll_t
& c
, const coll_t
& newcid
, const ghobject_t
& o
, const ghobject_t
& newoid
)
365 Index index_new
, index_old
;
366 IndexedPath path_new
, path_old
;
369 bool index_same
= false;
371 r
= get_index(newcid
, &index_new
);
374 r
= get_index(c
, &index_old
);
377 } else if (c
== newcid
) {
378 r
= get_index(c
, &index_old
);
381 index_new
= index_old
;
384 r
= get_index(c
, &index_old
);
387 r
= get_index(newcid
, &index_new
);
392 ceph_assert(index_old
.index
);
393 ceph_assert(index_new
.index
);
397 std::shared_lock l1
{(index_old
.index
)->access_lock
};
399 r
= index_old
->lookup(o
, &path_old
, &exist
);
401 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
407 std::unique_lock l2
{(index_new
.index
)->access_lock
};
409 r
= index_new
->lookup(newoid
, &path_new
, &exist
);
411 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
417 dout(25) << __FUNC__
<< ": path_old: " << path_old
<< dendl
;
418 dout(25) << __FUNC__
<< ": path_new: " << path_new
<< dendl
;
419 r
= ::link(path_old
->path(), path_new
->path());
423 r
= index_new
->created(newoid
, path_new
->path());
425 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
429 std::unique_lock l1
{(index_old
.index
)->access_lock
};
431 r
= index_old
->lookup(o
, &path_old
, &exist
);
433 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
439 r
= index_new
->lookup(newoid
, &path_new
, &exist
);
441 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
447 dout(25) << __FUNC__
<< ": path_old: " << path_old
<< dendl
;
448 dout(25) << __FUNC__
<< ": path_new: " << path_new
<< dendl
;
449 r
= ::link(path_old
->path(), path_new
->path());
453 // make sure old fd for unlinked/overwritten file is gone
454 fdcache
.clear(newoid
);
456 r
= index_new
->created(newoid
, path_new
->path());
458 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
465 int FileStore::lfn_unlink(const coll_t
& cid
, const ghobject_t
& o
,
466 const SequencerPosition
&spos
,
467 bool force_clear_omap
)
470 int r
= get_index(cid
, &index
);
472 dout(25) << __FUNC__
<< ": get_index failed " << cpp_strerror(r
) << dendl
;
476 ceph_assert(index
.index
);
477 std::unique_lock l
{(index
.index
)->access_lock
};
482 r
= index
->lookup(o
, &path
, &hardlink
);
484 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
488 if (!force_clear_omap
) {
489 if (hardlink
== 0 || hardlink
== 1) {
490 force_clear_omap
= true;
493 if (force_clear_omap
) {
494 dout(20) << __FUNC__
<< ": clearing omap on " << o
495 << " in cid " << cid
<< dendl
;
496 r
= object_map
->clear(o
, &spos
);
497 if (r
< 0 && r
!= -ENOENT
) {
498 dout(25) << __FUNC__
<< ": omap clear failed " << cpp_strerror(r
) << dendl
;
499 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
502 if (cct
->_conf
->filestore_debug_inject_read_err
) {
503 debug_obj_on_delete(o
);
505 if (!m_disable_wbthrottle
) {
506 wbthrottle
.clear_object(o
); // should be only non-cache ref
510 /* Ensure that replay of this op doesn't result in the object_map
513 if (!backend
->can_checkpoint())
514 object_map
->sync(&o
, &spos
);
517 if (!m_disable_wbthrottle
) {
518 wbthrottle
.clear_object(o
); // should be only non-cache ref
523 r
= index
->unlink(o
);
525 dout(25) << __FUNC__
<< ": index unlink failed " << cpp_strerror(r
) << dendl
;
531 FileStore::FileStore(CephContext
* cct
, const std::string
&base
,
532 const std::string
&jdev
, osflagbits_t flags
,
533 const char *name
, bool do_update
) :
534 JournalingObjectStore(cct
, base
),
536 basedir(base
), journalpath(jdev
),
537 generic_flags(flags
),
539 fsid_fd(-1), op_fd(-1),
540 basedir_fd(-1), current_fd(-1),
542 index_manager(cct
, do_update
),
544 timer(cct
, sync_entry_timeo_lock
),
545 stop(false), sync_thread(this),
549 m_disable_wbthrottle(cct
->_conf
->filestore_odsync_write
||
550 !cct
->_conf
->filestore_wbthrottle_enable
),
551 throttle_ops(cct
, "filestore_ops", cct
->_conf
->filestore_caller_concurrency
),
552 throttle_bytes(cct
, "filestore_bytes", cct
->_conf
->filestore_caller_concurrency
),
553 m_ondisk_finisher_num(cct
->_conf
->filestore_ondisk_finisher_threads
),
554 m_apply_finisher_num(cct
->_conf
->filestore_apply_finisher_threads
),
555 op_tp(cct
, "FileStore::op_tp", "tp_fstore_op", cct
->_conf
->filestore_op_threads
, "filestore_op_threads"),
556 op_wq(this, cct
->_conf
->filestore_op_thread_timeout
,
557 cct
->_conf
->filestore_op_thread_suicide_timeout
, &op_tp
),
559 trace_endpoint("0.0.0.0", 0, "FileStore"),
560 m_filestore_commit_timeout(cct
->_conf
->filestore_commit_timeout
),
561 m_filestore_journal_parallel(cct
->_conf
->filestore_journal_parallel
),
562 m_filestore_journal_trailing(cct
->_conf
->filestore_journal_trailing
),
563 m_filestore_journal_writeahead(cct
->_conf
->filestore_journal_writeahead
),
564 m_filestore_fiemap_threshold(cct
->_conf
->filestore_fiemap_threshold
),
565 m_filestore_max_sync_interval(cct
->_conf
->filestore_max_sync_interval
),
566 m_filestore_min_sync_interval(cct
->_conf
->filestore_min_sync_interval
),
567 m_filestore_fail_eio(cct
->_conf
->filestore_fail_eio
),
568 m_filestore_fadvise(cct
->_conf
->filestore_fadvise
),
569 do_update(do_update
),
570 m_journal_dio(cct
->_conf
->journal_dio
),
571 m_journal_aio(cct
->_conf
->journal_aio
),
572 m_journal_force_aio(cct
->_conf
->journal_force_aio
),
573 m_osd_rollback_to_cluster_snap(cct
->_conf
->osd_rollback_to_cluster_snap
),
574 m_osd_use_stale_snap(cct
->_conf
->osd_use_stale_snap
),
575 m_filestore_do_dump(false),
576 m_filestore_dump_fmt(true),
577 m_filestore_sloppy_crc(cct
->_conf
->filestore_sloppy_crc
),
578 m_filestore_sloppy_crc_block_size(cct
->_conf
->filestore_sloppy_crc_block_size
),
579 m_filestore_max_alloc_hint_size(cct
->_conf
->filestore_max_alloc_hint_size
),
581 m_filestore_max_inline_xattr_size(0),
582 m_filestore_max_inline_xattrs(0),
583 m_filestore_max_xattr_value_size(0)
585 m_filestore_kill_at
= cct
->_conf
->filestore_kill_at
;
586 for (int i
= 0; i
< m_ondisk_finisher_num
; ++i
) {
588 oss
<< "filestore-ondisk-" << i
;
589 Finisher
*f
= new Finisher(cct
, oss
.str(), "fn_odsk_fstore");
590 ondisk_finishers
.push_back(f
);
592 for (int i
= 0; i
< m_apply_finisher_num
; ++i
) {
594 oss
<< "filestore-apply-" << i
;
595 Finisher
*f
= new Finisher(cct
, oss
.str(), "fn_appl_fstore");
596 apply_finishers
.push_back(f
);
600 oss
<< basedir
<< "/current";
601 current_fn
= oss
.str();
604 sss
<< basedir
<< "/current/commit_op_seq";
605 current_op_seq_fn
= sss
.str();
608 if (cct
->_conf
->filestore_omap_backend_path
!= "") {
609 omap_dir
= cct
->_conf
->filestore_omap_backend_path
;
611 omss
<< basedir
<< "/current/omap";
612 omap_dir
= omss
.str();
616 PerfCountersBuilder
plb(cct
, internal_name
, l_filestore_first
, l_filestore_last
);
618 plb
.add_u64(l_filestore_journal_queue_ops
, "journal_queue_ops", "Operations in journal queue");
619 plb
.add_u64(l_filestore_journal_ops
, "journal_ops", "Active journal entries to be applied");
620 plb
.add_u64(l_filestore_journal_queue_bytes
, "journal_queue_bytes", "Size of journal queue");
621 plb
.add_u64(l_filestore_journal_bytes
, "journal_bytes", "Active journal operation size to be applied");
622 plb
.add_time_avg(l_filestore_journal_latency
, "journal_latency", "Average journal queue completing latency",
623 NULL
, PerfCountersBuilder::PRIO_USEFUL
);
624 plb
.add_u64_counter(l_filestore_journal_wr
, "journal_wr", "Journal write IOs");
625 plb
.add_u64_avg(l_filestore_journal_wr_bytes
, "journal_wr_bytes", "Journal data written");
626 plb
.add_u64(l_filestore_op_queue_max_ops
, "op_queue_max_ops", "Max operations in writing to FS queue");
627 plb
.add_u64(l_filestore_op_queue_ops
, "op_queue_ops", "Operations in writing to FS queue");
628 plb
.add_u64_counter(l_filestore_ops
, "ops", "Operations written to store");
629 plb
.add_u64(l_filestore_op_queue_max_bytes
, "op_queue_max_bytes", "Max data in writing to FS queue");
630 plb
.add_u64(l_filestore_op_queue_bytes
, "op_queue_bytes", "Size of writing to FS queue");
631 plb
.add_u64_counter(l_filestore_bytes
, "bytes", "Data written to store");
632 plb
.add_time_avg(l_filestore_apply_latency
, "apply_latency", "Apply latency");
633 plb
.add_u64(l_filestore_committing
, "committing", "Is currently committing");
635 plb
.add_u64_counter(l_filestore_commitcycle
, "commitcycle", "Commit cycles");
636 plb
.add_time_avg(l_filestore_commitcycle_interval
, "commitcycle_interval", "Average interval between commits");
637 plb
.add_time_avg(l_filestore_commitcycle_latency
, "commitcycle_latency", "Average latency of commit");
638 plb
.add_u64_counter(l_filestore_journal_full
, "journal_full", "Journal writes while full");
639 plb
.add_time_avg(l_filestore_queue_transaction_latency_avg
, "queue_transaction_latency_avg",
640 "Store operation queue latency", NULL
, PerfCountersBuilder::PRIO_USEFUL
);
641 plb
.add_time(l_filestore_sync_pause_max_lat
, "sync_pause_max_latency", "Max latency of op_wq pause before syncfs");
643 logger
= plb
.create_perf_counters();
645 cct
->get_perfcounters_collection()->add(logger
);
646 cct
->_conf
.add_observer(this);
648 superblock
.compat_features
= get_fs_initial_compat_set();
651 FileStore::~FileStore()
653 for (vector
<Finisher
*>::iterator it
= ondisk_finishers
.begin(); it
!= ondisk_finishers
.end(); ++it
) {
657 for (vector
<Finisher
*>::iterator it
= apply_finishers
.begin(); it
!= apply_finishers
.end(); ++it
) {
661 cct
->_conf
.remove_observer(this);
662 cct
->get_perfcounters_collection()->remove(logger
);
665 journal
->logger
= nullptr;
669 if (m_filestore_do_dump
) {
674 static void get_attrname(const char *name
, char *buf
, int len
)
676 snprintf(buf
, len
, "user.ceph.%s", name
);
679 bool parse_attrname(char **name
)
681 if (strncmp(*name
, "user.ceph.", 10) == 0) {
688 void FileStore::collect_metadata(map
<string
,string
> *pm
)
690 char partition_path
[PATH_MAX
];
691 char dev_node
[PATH_MAX
];
693 (*pm
)["filestore_backend"] = backend
->get_name();
695 ss
<< "0x" << std::hex
<< m_fs_type
<< std::dec
;
696 (*pm
)["filestore_f_type"] = ss
.str();
698 if (cct
->_conf
->filestore_collect_device_partition_information
) {
700 BlkDev
blkdev(fsid_fd
);
701 if (rc
= blkdev
.partition(partition_path
, PATH_MAX
); rc
) {
702 (*pm
)["backend_filestore_partition_path"] = "unknown";
704 (*pm
)["backend_filestore_partition_path"] = string(partition_path
);
706 if (rc
= blkdev
.wholedisk(dev_node
, PATH_MAX
); rc
) {
707 (*pm
)["backend_filestore_dev_node"] = "unknown";
709 (*pm
)["backend_filestore_dev_node"] = string(dev_node
);
712 if (rc
== 0 && vdo_fd
>= 0) {
713 (*pm
)["vdo"] = "true";
714 (*pm
)["vdo_physical_size"] =
715 stringify(4096 * get_vdo_stat(vdo_fd
, "physical_blocks"));
718 journal
->collect_metadata(pm
);
723 int FileStore::get_devices(set
<string
> *ls
)
726 BlkDev
blkdev(fsid_fd
);
727 if (int rc
= blkdev
.wholedisk(&dev_node
); rc
) {
730 get_raw_devices(dev_node
, ls
);
732 journal
->get_devices(ls
);
737 int FileStore::statfs(struct store_statfs_t
*buf0
, osd_alert_list_t
* alerts
)
742 alerts
->clear(); // returns nothing for now
744 if (::statfs(basedir
.c_str(), &buf
) < 0) {
746 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
747 ceph_assert(r
!= -ENOENT
);
751 uint64_t bfree
= buf
.f_bavail
* buf
.f_bsize
;
753 // assume all of leveldb/rocksdb is omap.
755 map
<string
,uint64_t> kv_usage
;
756 buf0
->omap_allocated
+= object_map
->get_db()->get_estimated_size(kv_usage
);
759 uint64_t thin_total
, thin_avail
;
760 if (get_vdo_utilization(vdo_fd
, &thin_total
, &thin_avail
)) {
761 buf0
->total
= thin_total
;
762 bfree
= std::min(bfree
, thin_avail
);
763 buf0
->allocated
= thin_total
- thin_avail
;
764 buf0
->data_stored
= bfree
;
766 buf0
->total
= buf
.f_blocks
* buf
.f_bsize
;
767 buf0
->allocated
= bfree
;
768 buf0
->data_stored
= bfree
;
770 buf0
->available
= bfree
;
772 // FIXME: we don't know how to populate buf->internal_metadata; XFS doesn't
773 // tell us what its internal overhead is.
775 // Adjust for writes pending in the journal
777 uint64_t estimate
= journal
->get_journal_size_estimate();
778 buf0
->internally_reserved
= estimate
;
779 if (buf0
->available
> estimate
)
780 buf0
->available
-= estimate
;
788 int FileStore::pool_statfs(uint64_t pool_id
, struct store_statfs_t
*buf
,
794 void FileStore::new_journal()
796 if (journalpath
.length()) {
797 dout(10) << "open_journal at " << journalpath
<< dendl
;
798 journal
= new FileJournal(cct
, fsid
, &finisher
, &sync_cond
,
800 m_journal_dio
, m_journal_aio
,
801 m_journal_force_aio
);
803 journal
->logger
= logger
;
808 int FileStore::dump_journal(ostream
& out
)
812 if (!journalpath
.length())
815 FileJournal
*journal
= new FileJournal(cct
, fsid
, &finisher
, &sync_cond
, journalpath
.c_str(), m_journal_dio
);
816 r
= journal
->dump(out
);
822 FileStoreBackend
*FileStoreBackend::create(unsigned long f_type
, FileStore
*fs
)
825 #if defined(__linux__)
826 case BTRFS_SUPER_MAGIC
:
827 return new BtrfsFileStoreBackend(fs
);
829 case XFS_SUPER_MAGIC
:
830 return new XfsFileStoreBackend(fs
);
834 case ZFS_SUPER_MAGIC
:
835 return new ZFSFileStoreBackend(fs
);
838 return new GenericFileStoreBackend(fs
);
842 void FileStore::create_backend(unsigned long f_type
)
846 ceph_assert(!backend
);
847 backend
= FileStoreBackend::create(f_type
, this);
849 dout(0) << "backend " << backend
->get_name()
850 << " (magic 0x" << std::hex
<< f_type
<< std::dec
<< ")"
854 #if defined(__linux__)
855 case BTRFS_SUPER_MAGIC
:
856 if (!m_disable_wbthrottle
){
857 wbthrottle
.set_fs(WBThrottle::BTRFS
);
861 case XFS_SUPER_MAGIC
:
862 // wbthrottle is constructed with fs(WBThrottle::XFS)
867 set_xattr_limits_via_conf();
870 int FileStore::mkfs()
873 char fsid_fn
[PATH_MAX
];
876 uuid_d old_omap_fsid
;
878 dout(1) << "mkfs in " << basedir
<< dendl
;
879 basedir_fd
= ::open(basedir
.c_str(), O_RDONLY
|O_CLOEXEC
);
880 if (basedir_fd
< 0) {
882 derr
<< __FUNC__
<< ": failed to open base dir " << basedir
<< ": " << cpp_strerror(ret
) << dendl
;
887 snprintf(fsid_fn
, sizeof(fsid_fn
), "%s/fsid", basedir
.c_str());
888 fsid_fd
= ::open(fsid_fn
, O_RDWR
|O_CREAT
|O_CLOEXEC
, 0644);
891 derr
<< __FUNC__
<< ": failed to open " << fsid_fn
<< ": " << cpp_strerror(ret
) << dendl
;
892 goto close_basedir_fd
;
895 if (lock_fsid() < 0) {
900 if (read_fsid(fsid_fd
, &old_fsid
) < 0 || old_fsid
.is_zero()) {
901 if (fsid
.is_zero()) {
902 fsid
.generate_random();
903 dout(1) << __FUNC__
<< ": generated fsid " << fsid
<< dendl
;
905 dout(1) << __FUNC__
<< ": using provided fsid " << fsid
<< dendl
;
908 fsid
.print(fsid_str
);
909 strcat(fsid_str
, "\n");
910 ret
= ::ftruncate(fsid_fd
, 0);
913 derr
<< __FUNC__
<< ": failed to truncate fsid: "
914 << cpp_strerror(ret
) << dendl
;
917 ret
= safe_write(fsid_fd
, fsid_str
, strlen(fsid_str
));
919 derr
<< __FUNC__
<< ": failed to write fsid: "
920 << cpp_strerror(ret
) << dendl
;
923 if (::fsync(fsid_fd
) < 0) {
925 derr
<< __FUNC__
<< ": close failed: can't write fsid: "
926 << cpp_strerror(ret
) << dendl
;
929 dout(10) << __FUNC__
<< ": fsid is " << fsid
<< dendl
;
931 if (!fsid
.is_zero() && fsid
!= old_fsid
) {
932 derr
<< __FUNC__
<< ": on-disk fsid " << old_fsid
<< " != provided " << fsid
<< dendl
;
937 dout(1) << __FUNC__
<< ": fsid is already set to " << fsid
<< dendl
;
941 ret
= write_version_stamp();
943 derr
<< __FUNC__
<< ": write_version_stamp() failed: "
944 << cpp_strerror(ret
) << dendl
;
949 superblock
.omap_backend
= cct
->_conf
->filestore_omap_backend
;
950 ret
= write_superblock();
952 derr
<< __FUNC__
<< ": write_superblock() failed: "
953 << cpp_strerror(ret
) << dendl
;
957 struct statfs basefs
;
958 ret
= ::fstatfs(basedir_fd
, &basefs
);
961 derr
<< __FUNC__
<< ": cannot fstatfs basedir "
962 << cpp_strerror(ret
) << dendl
;
966 #if defined(__linux__)
967 if (basefs
.f_type
== BTRFS_SUPER_MAGIC
&&
968 !g_ceph_context
->check_experimental_feature_enabled("btrfs")) {
969 derr
<< __FUNC__
<< ": deprecated btrfs support is not enabled" << dendl
;
974 create_backend(basefs
.f_type
);
976 ret
= backend
->create_current();
978 derr
<< __FUNC__
<< ": failed to create current/ " << cpp_strerror(ret
) << dendl
;
982 // write initial op_seq
984 uint64_t initial_seq
= 0;
985 int fd
= read_op_seq(&initial_seq
);
988 derr
<< __FUNC__
<< ": failed to create " << current_op_seq_fn
<< ": "
989 << cpp_strerror(ret
) << dendl
;
992 if (initial_seq
== 0) {
993 ret
= write_op_seq(fd
, 1);
995 VOID_TEMP_FAILURE_RETRY(::close(fd
));
996 derr
<< __FUNC__
<< ": failed to write to " << current_op_seq_fn
<< ": "
997 << cpp_strerror(ret
) << dendl
;
1001 if (backend
->can_checkpoint()) {
1002 // create snap_1 too
1003 current_fd
= ::open(current_fn
.c_str(), O_RDONLY
|O_CLOEXEC
);
1004 ceph_assert(current_fd
>= 0);
1006 snprintf(s
, sizeof(s
), COMMIT_SNAP_ITEM
, 1ull);
1007 ret
= backend
->create_checkpoint(s
, nullptr);
1008 VOID_TEMP_FAILURE_RETRY(::close(current_fd
));
1009 if (ret
< 0 && ret
!= -EEXIST
) {
1010 VOID_TEMP_FAILURE_RETRY(::close(fd
));
1011 derr
<< __FUNC__
<< ": failed to create snap_1: " << cpp_strerror(ret
) << dendl
;
1016 VOID_TEMP_FAILURE_RETRY(::close(fd
));
1018 ret
= KeyValueDB::test_init(superblock
.omap_backend
, omap_dir
);
1020 derr
<< __FUNC__
<< ": failed to create " << cct
->_conf
->filestore_omap_backend
<< dendl
;
1023 // create fsid under omap
1026 char omap_fsid_fn
[PATH_MAX
];
1027 snprintf(omap_fsid_fn
, sizeof(omap_fsid_fn
), "%s/osd_uuid", omap_dir
.c_str());
1028 omap_fsid_fd
= ::open(omap_fsid_fn
, O_RDWR
|O_CREAT
|O_CLOEXEC
, 0644);
1029 if (omap_fsid_fd
< 0) {
1031 derr
<< __FUNC__
<< ": failed to open " << omap_fsid_fn
<< ": " << cpp_strerror(ret
) << dendl
;
1035 if (read_fsid(omap_fsid_fd
, &old_omap_fsid
) < 0 || old_omap_fsid
.is_zero()) {
1036 ceph_assert(!fsid
.is_zero());
1037 fsid
.print(fsid_str
);
1038 strcat(fsid_str
, "\n");
1039 ret
= ::ftruncate(omap_fsid_fd
, 0);
1042 derr
<< __FUNC__
<< ": failed to truncate fsid: "
1043 << cpp_strerror(ret
) << dendl
;
1044 goto close_omap_fsid_fd
;
1046 ret
= safe_write(omap_fsid_fd
, fsid_str
, strlen(fsid_str
));
1048 derr
<< __FUNC__
<< ": failed to write fsid: "
1049 << cpp_strerror(ret
) << dendl
;
1050 goto close_omap_fsid_fd
;
1052 dout(10) << __FUNC__
<< ": write success, fsid:" << fsid_str
<< ", ret:" << ret
<< dendl
;
1053 if (::fsync(omap_fsid_fd
) < 0) {
1055 derr
<< __FUNC__
<< ": close failed: can't write fsid: "
1056 << cpp_strerror(ret
) << dendl
;
1057 goto close_omap_fsid_fd
;
1059 dout(10) << "mkfs omap fsid is " << fsid
<< dendl
;
1061 if (fsid
!= old_omap_fsid
) {
1062 derr
<< __FUNC__
<< ": " << omap_fsid_fn
1063 << " has existed omap fsid " << old_omap_fsid
1064 << " != expected osd fsid " << fsid
1067 goto close_omap_fsid_fd
;
1069 dout(1) << __FUNC__
<< ": omap fsid is already set to " << fsid
<< dendl
;
1072 dout(1) << cct
->_conf
->filestore_omap_backend
<< " db exists/created" << dendl
;
1077 goto close_omap_fsid_fd
;
1079 ret
= write_meta("type", "filestore");
1081 goto close_omap_fsid_fd
;
1083 dout(1) << "mkfs done in " << basedir
<< dendl
;
1087 VOID_TEMP_FAILURE_RETRY(::close(omap_fsid_fd
));
1089 VOID_TEMP_FAILURE_RETRY(::close(fsid_fd
));
1092 VOID_TEMP_FAILURE_RETRY(::close(basedir_fd
));
1098 int FileStore::mkjournal()
1103 snprintf(fn
, sizeof(fn
), "%s/fsid", basedir
.c_str());
1104 int fd
= ::open(fn
, O_RDONLY
|O_CLOEXEC
, 0644);
1107 derr
<< __FUNC__
<< ": open error: " << cpp_strerror(err
) << dendl
;
1110 ret
= read_fsid(fd
, &fsid
);
1112 derr
<< __FUNC__
<< ": read error: " << cpp_strerror(ret
) << dendl
;
1113 VOID_TEMP_FAILURE_RETRY(::close(fd
));
1116 VOID_TEMP_FAILURE_RETRY(::close(fd
));
1122 ret
= journal
->check();
1124 ret
= journal
->create();
1126 derr
<< __FUNC__
<< ": error creating journal on " << journalpath
1127 << ": " << cpp_strerror(ret
) << dendl
;
1129 dout(0) << __FUNC__
<< ": created journal on " << journalpath
<< dendl
;
1137 int FileStore::read_fsid(int fd
, uuid_d
*uuid
)
1140 memset(fsid_str
, 0, sizeof(fsid_str
));
1141 int ret
= safe_read(fd
, fsid_str
, sizeof(fsid_str
));
1145 // old 64-bit fsid... mirror it.
1146 *(uint64_t*)&uuid
->bytes()[0] = *(uint64_t*)fsid_str
;
1147 *(uint64_t*)&uuid
->bytes()[8] = *(uint64_t*)fsid_str
;
1155 if (!uuid
->parse(fsid_str
))
1160 int FileStore::lock_fsid()
1163 memset(&l
, 0, sizeof(l
));
1165 l
.l_whence
= SEEK_SET
;
1168 int r
= ::fcntl(fsid_fd
, F_SETLK
, &l
);
1171 dout(0) << __FUNC__
<< ": failed to lock " << basedir
<< "/fsid, is another ceph-osd still running? "
1172 << cpp_strerror(err
) << dendl
;
1178 bool FileStore::test_mount_in_use()
1180 dout(5) << __FUNC__
<< ": basedir " << basedir
<< " journal " << journalpath
<< dendl
;
1182 snprintf(fn
, sizeof(fn
), "%s/fsid", basedir
.c_str());
1184 // verify fs isn't in use
1186 fsid_fd
= ::open(fn
, O_RDWR
|O_CLOEXEC
, 0644);
1188 return 0; // no fsid, ok.
1189 bool inuse
= lock_fsid() < 0;
1190 VOID_TEMP_FAILURE_RETRY(::close(fsid_fd
));
1195 bool FileStore::is_rotational()
1199 rotational
= backend
->is_rotational();
1201 int fd
= ::open(basedir
.c_str(), O_RDONLY
|O_CLOEXEC
);
1205 int r
= ::fstatfs(fd
, &st
);
1210 create_backend(st
.f_type
);
1211 rotational
= backend
->is_rotational();
1215 dout(10) << __func__
<< " " << (int)rotational
<< dendl
;
1219 bool FileStore::is_journal_rotational()
1221 bool journal_rotational
;
1223 journal_rotational
= backend
->is_journal_rotational();
1225 int fd
= ::open(journalpath
.c_str(), O_RDONLY
|O_CLOEXEC
);
1229 int r
= ::fstatfs(fd
, &st
);
1234 create_backend(st
.f_type
);
1235 journal_rotational
= backend
->is_journal_rotational();
1239 dout(10) << __func__
<< " " << (int)journal_rotational
<< dendl
;
1240 return journal_rotational
;
1243 int FileStore::_detect_fs()
1246 int r
= ::fstatfs(basedir_fd
, &st
);
1250 blk_size
= st
.f_bsize
;
1252 #if defined(__linux__)
1253 if (st
.f_type
== BTRFS_SUPER_MAGIC
&&
1254 !g_ceph_context
->check_experimental_feature_enabled("btrfs")) {
1255 derr
<<__FUNC__
<< ": deprecated btrfs support is not enabled" << dendl
;
1260 create_backend(st
.f_type
);
1262 r
= backend
->detect_features();
1264 derr
<< __FUNC__
<< ": detect_features error: " << cpp_strerror(r
) << dendl
;
1270 char dev_node
[PATH_MAX
];
1271 if (int rc
= BlkDev
{fsid_fd
}.wholedisk(dev_node
, PATH_MAX
); rc
== 0) {
1272 vdo_fd
= get_vdo_stats_handle(dev_node
, &vdo_name
);
1274 dout(0) << __func__
<< " VDO volume " << vdo_name
<< " for " << dev_node
1284 snprintf(fn
, sizeof(fn
), "%s/xattr_test", basedir
.c_str());
1285 int tmpfd
= ::open(fn
, O_CREAT
|O_WRONLY
|O_TRUNC
|O_CLOEXEC
, 0700);
1288 derr
<< __FUNC__
<< ": unable to create " << fn
<< ": " << cpp_strerror(ret
) << dendl
;
1292 int ret
= chain_fsetxattr(tmpfd
, "user.test", &x
, sizeof(x
));
1294 ret
= chain_fgetxattr(tmpfd
, "user.test", &y
, sizeof(y
));
1295 if ((ret
< 0) || (x
!= y
)) {
1296 derr
<< "Extended attributes don't appear to work. ";
1298 *_dout
<< "Got error " + cpp_strerror(ret
) + ". ";
1299 *_dout
<< "If you are using ext3 or ext4, be sure to mount the underlying "
1300 << "file system with the 'user_xattr' option." << dendl
;
1302 VOID_TEMP_FAILURE_RETRY(::close(tmpfd
));
1307 memset(buf
, 0, sizeof(buf
)); // shut up valgrind
1308 chain_fsetxattr(tmpfd
, "user.test", &buf
, sizeof(buf
));
1309 chain_fsetxattr(tmpfd
, "user.test2", &buf
, sizeof(buf
));
1310 chain_fsetxattr(tmpfd
, "user.test3", &buf
, sizeof(buf
));
1311 chain_fsetxattr(tmpfd
, "user.test4", &buf
, sizeof(buf
));
1312 ret
= chain_fsetxattr(tmpfd
, "user.test5", &buf
, sizeof(buf
));
1313 if (ret
== -ENOSPC
) {
1314 dout(0) << "limited size xattrs" << dendl
;
1316 chain_fremovexattr(tmpfd
, "user.test");
1317 chain_fremovexattr(tmpfd
, "user.test2");
1318 chain_fremovexattr(tmpfd
, "user.test3");
1319 chain_fremovexattr(tmpfd
, "user.test4");
1320 chain_fremovexattr(tmpfd
, "user.test5");
1323 VOID_TEMP_FAILURE_RETRY(::close(tmpfd
));
1328 int FileStore::_sanity_check_fs()
1332 if (((int)m_filestore_journal_writeahead
+
1333 (int)m_filestore_journal_parallel
+
1334 (int)m_filestore_journal_trailing
) > 1) {
1335 dout(0) << "mount ERROR: more than one of filestore journal {writeahead,parallel,trailing} enabled" << dendl
;
1337 << " ** WARNING: more than one of 'filestore journal {writeahead,parallel,trailing}'\n"
1338 << " is enabled in ceph.conf. You must choose a single journal mode."
1339 << TEXT_NORMAL
<< std::endl
;
1343 if (!backend
->can_checkpoint()) {
1344 if (!journal
|| !m_filestore_journal_writeahead
) {
1345 dout(0) << "mount WARNING: no btrfs, and no journal in writeahead mode; data may be lost" << dendl
;
1347 << " ** WARNING: no btrfs AND (no journal OR journal not in writeahead mode)\n"
1348 << " For non-btrfs volumes, a writeahead journal is required to\n"
1349 << " maintain on-disk consistency in the event of a crash. Your conf\n"
1350 << " should include something like:\n"
1351 << " osd journal = /path/to/journal_device_or_file\n"
1352 << " filestore journal writeahead = true\n"
1358 dout(0) << "mount WARNING: no journal" << dendl
;
1360 << " ** WARNING: No osd journal is configured: write latency may be high.\n"
1361 << " If you will not be using an osd journal, write latency may be\n"
1362 << " relatively high. It can be reduced somewhat by lowering\n"
1363 << " filestore_max_sync_interval, but lower values mean lower write\n"
1364 << " throughput, especially with spinning disks.\n"
1371 int FileStore::write_superblock()
1374 encode(superblock
, bl
);
1375 return safe_write_file(basedir
.c_str(), "superblock",
1376 bl
.c_str(), bl
.length(), 0600);
1379 int FileStore::read_superblock()
1381 bufferptr
bp(PATH_MAX
);
1382 int ret
= safe_read_file(basedir
.c_str(), "superblock",
1383 bp
.c_str(), bp
.length());
1385 if (ret
== -ENOENT
) {
1386 // If the file doesn't exist write initial CompatSet
1387 return write_superblock();
1393 bl
.push_back(std::move(bp
));
1394 auto i
= bl
.cbegin();
1395 decode(superblock
, i
);
1399 int FileStore::update_version_stamp()
1401 return write_version_stamp();
1404 int FileStore::version_stamp_is_valid(uint32_t *version
)
1406 bufferptr
bp(PATH_MAX
);
1407 int ret
= safe_read_file(basedir
.c_str(), "store_version",
1408 bp
.c_str(), bp
.length());
1413 bl
.push_back(std::move(bp
));
1414 auto i
= bl
.cbegin();
1415 decode(*version
, i
);
1416 dout(10) << __FUNC__
<< ": was " << *version
<< " vs target "
1417 << target_version
<< dendl
;
1418 if (*version
== target_version
)
1424 int FileStore::flush_cache(ostream
*os
)
1426 string drop_caches_file
= "/proc/sys/vm/drop_caches";
1427 int drop_caches_fd
= ::open(drop_caches_file
.c_str(), O_WRONLY
|O_CLOEXEC
), ret
= 0;
1429 size_t len
= strlen(buf
);
1431 if (drop_caches_fd
< 0) {
1433 derr
<< __FUNC__
<< ": failed to open " << drop_caches_file
<< ": " << cpp_strerror(ret
) << dendl
;
1435 *os
<< "FileStore flush_cache: failed to open " << drop_caches_file
<< ": " << cpp_strerror(ret
);
1440 if (::write(drop_caches_fd
, buf
, len
) < 0) {
1442 derr
<< __FUNC__
<< ": failed to write to " << drop_caches_file
<< ": " << cpp_strerror(ret
) << dendl
;
1444 *os
<< "FileStore flush_cache: failed to write to " << drop_caches_file
<< ": " << cpp_strerror(ret
);
1450 ::close(drop_caches_fd
);
1454 int FileStore::write_version_stamp()
1456 dout(1) << __FUNC__
<< ": " << target_version
<< dendl
;
1458 encode(target_version
, bl
);
1460 return safe_write_file(basedir
.c_str(), "store_version",
1461 bl
.c_str(), bl
.length(), 0600);
1464 int FileStore::upgrade()
1466 dout(1) << __FUNC__
<< dendl
;
1468 int r
= version_stamp_is_valid(&version
);
1471 derr
<< "The store_version file doesn't exist." << dendl
;
1480 derr
<< "ObjectStore is old at version " << version
<< ". Please upgrade to firefly v0.80.x, convert your store, and then upgrade." << dendl
;
1484 // nothing necessary in FileStore for v3 -> v4 upgrade; we just need to
1485 // open up DBObjectMap with the do_upgrade flag, which we already did.
1486 update_version_stamp();
1490 int FileStore::read_op_seq(uint64_t *seq
)
1492 int op_fd
= ::open(current_op_seq_fn
.c_str(), O_CREAT
|O_RDWR
|O_CLOEXEC
, 0644);
1495 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
1499 memset(s
, 0, sizeof(s
));
1500 int ret
= safe_read(op_fd
, s
, sizeof(s
) - 1);
1502 derr
<< __FUNC__
<< ": error reading " << current_op_seq_fn
<< ": " << cpp_strerror(ret
) << dendl
;
1503 VOID_TEMP_FAILURE_RETRY(::close(op_fd
));
1504 ceph_assert(!m_filestore_fail_eio
|| ret
!= -EIO
);
1511 int FileStore::write_op_seq(int fd
, uint64_t seq
)
1514 snprintf(s
, sizeof(s
), "%" PRId64
"\n", seq
);
1515 int ret
= TEMP_FAILURE_RETRY(::pwrite(fd
, s
, strlen(s
), 0));
1518 ceph_assert(!m_filestore_fail_eio
|| ret
!= -EIO
);
1523 int FileStore::mount()
1527 uint64_t initial_op_seq
;
1529 set
<string
> cluster_snaps
;
1530 CompatSet supported_compat_set
= get_fs_supported_compat_set();
1532 dout(5) << "basedir " << basedir
<< " journal " << journalpath
<< dendl
;
1534 ret
= set_throttle_params();
1538 // make sure global base dir exists
1539 if (::access(basedir
.c_str(), R_OK
| W_OK
)) {
1541 derr
<< __FUNC__
<< ": unable to access basedir '" << basedir
<< "': "
1542 << cpp_strerror(ret
) << dendl
;
1547 snprintf(buf
, sizeof(buf
), "%s/fsid", basedir
.c_str());
1548 fsid_fd
= ::open(buf
, O_RDWR
|O_CLOEXEC
, 0644);
1551 derr
<< __FUNC__
<< ": error opening '" << buf
<< "': "
1552 << cpp_strerror(ret
) << dendl
;
1556 ret
= read_fsid(fsid_fd
, &fsid
);
1558 derr
<< __FUNC__
<< ": error reading fsid_fd: " << cpp_strerror(ret
)
1563 if (lock_fsid() < 0) {
1564 derr
<< __FUNC__
<< ": lock_fsid failed" << dendl
;
1569 dout(10) << "mount fsid is " << fsid
<< dendl
;
1572 uint32_t version_stamp
;
1573 ret
= version_stamp_is_valid(&version_stamp
);
1575 derr
<< __FUNC__
<< ": error in version_stamp_is_valid: "
1576 << cpp_strerror(ret
) << dendl
;
1578 } else if (ret
== 0) {
1579 if (do_update
|| (int)version_stamp
< cct
->_conf
->filestore_update_to
) {
1580 derr
<< __FUNC__
<< ": stale version stamp detected: "
1582 << ". Proceeding, do_update "
1583 << "is set, performing disk format upgrade."
1588 derr
<< __FUNC__
<< ": stale version stamp " << version_stamp
1589 << ". Please run the FileStore update script before starting the "
1590 << "OSD, or set filestore_update_to to " << target_version
1591 << " (currently " << cct
->_conf
->filestore_update_to
<< ")"
1597 ret
= read_superblock();
1602 // Check if this FileStore supports all the necessary features to mount
1603 if (supported_compat_set
.compare(superblock
.compat_features
) == -1) {
1604 derr
<< __FUNC__
<< ": Incompatible features set "
1605 << superblock
.compat_features
<< dendl
;
1610 // open some dir handles
1611 basedir_fd
= ::open(basedir
.c_str(), O_RDONLY
|O_CLOEXEC
);
1612 if (basedir_fd
< 0) {
1614 derr
<< __FUNC__
<< ": failed to open " << basedir
<< ": "
1615 << cpp_strerror(ret
) << dendl
;
1620 // test for btrfs, xattrs, etc.
1623 derr
<< __FUNC__
<< ": error in _detect_fs: "
1624 << cpp_strerror(ret
) << dendl
;
1625 goto close_basedir_fd
;
1630 ret
= backend
->list_checkpoints(ls
);
1632 derr
<< __FUNC__
<< ": error in _list_snaps: "<< cpp_strerror(ret
) << dendl
;
1633 goto close_basedir_fd
;
1636 long long unsigned c
, prev
= 0;
1637 char clustersnap
[NAME_MAX
];
1638 for (list
<string
>::iterator it
= ls
.begin(); it
!= ls
.end(); ++it
) {
1639 if (sscanf(it
->c_str(), COMMIT_SNAP_ITEM
, &c
) == 1) {
1640 ceph_assert(c
> prev
);
1643 } else if (sscanf(it
->c_str(), CLUSTER_SNAP_ITEM
, clustersnap
) == 1)
1644 cluster_snaps
.insert(*it
);
1648 if (m_osd_rollback_to_cluster_snap
.length() &&
1649 cluster_snaps
.count(m_osd_rollback_to_cluster_snap
) == 0) {
1650 derr
<< "rollback to cluster snapshot '" << m_osd_rollback_to_cluster_snap
<< "': not found" << dendl
;
1652 goto close_basedir_fd
;
1656 snprintf(nosnapfn
, sizeof(nosnapfn
), "%s/nosnap", current_fn
.c_str());
1658 if (backend
->can_checkpoint()) {
1659 if (snaps
.empty()) {
1660 dout(0) << __FUNC__
<< ": WARNING: no consistent snaps found, store may be in inconsistent state" << dendl
;
1663 uint64_t curr_seq
= 0;
1665 if (m_osd_rollback_to_cluster_snap
.length()) {
1667 << " ** NOTE: rolling back to cluster snapshot " << m_osd_rollback_to_cluster_snap
<< " **"
1670 ceph_assert(cluster_snaps
.count(m_osd_rollback_to_cluster_snap
));
1671 snprintf(s
, sizeof(s
), CLUSTER_SNAP_ITEM
, m_osd_rollback_to_cluster_snap
.c_str());
1674 int fd
= read_op_seq(&curr_seq
);
1676 VOID_TEMP_FAILURE_RETRY(::close(fd
));
1680 dout(10) << " current/ seq was " << curr_seq
<< dendl
;
1682 dout(10) << " current/ missing entirely (unusual, but okay)" << dendl
;
1684 uint64_t cp
= snaps
.back();
1685 dout(10) << " most recent snap from " << snaps
<< " is " << cp
<< dendl
;
1687 // if current/ is marked as non-snapshotted, refuse to roll
1688 // back (without clear direction) to avoid throwing out new
1691 if (::stat(nosnapfn
, &st
) == 0) {
1692 if (!m_osd_use_stale_snap
) {
1693 derr
<< "ERROR: " << nosnapfn
<< " exists, not rolling back to avoid losing new data" << dendl
;
1694 derr
<< "Force rollback to old snapshotted version with 'osd use stale snap = true'" << dendl
;
1695 derr
<< "config option for --osd-use-stale-snap startup argument." << dendl
;
1697 goto close_basedir_fd
;
1699 derr
<< "WARNING: user forced start with data sequence mismatch: current was " << curr_seq
1700 << ", newest snap is " << cp
<< dendl
;
1702 << " ** WARNING: forcing the use of stale snapshot data **"
1703 << TEXT_NORMAL
<< std::endl
;
1706 dout(10) << __FUNC__
<< ": rolling back to consistent snap " << cp
<< dendl
;
1707 snprintf(s
, sizeof(s
), COMMIT_SNAP_ITEM
, (long long unsigned)cp
);
1711 ret
= backend
->rollback_to(s
);
1713 derr
<< __FUNC__
<< ": error rolling back to " << s
<< ": "
1714 << cpp_strerror(ret
) << dendl
;
1715 goto close_basedir_fd
;
1721 current_fd
= ::open(current_fn
.c_str(), O_RDONLY
|O_CLOEXEC
);
1722 if (current_fd
< 0) {
1724 derr
<< __FUNC__
<< ": error opening: " << current_fn
<< ": " << cpp_strerror(ret
) << dendl
;
1725 goto close_basedir_fd
;
1728 ceph_assert(current_fd
>= 0);
1730 op_fd
= read_op_seq(&initial_op_seq
);
1733 derr
<< __FUNC__
<< ": read_op_seq failed" << dendl
;
1734 goto close_current_fd
;
1737 dout(5) << "mount op_seq is " << initial_op_seq
<< dendl
;
1738 if (initial_op_seq
== 0) {
1739 derr
<< "mount initial op seq is 0; something is wrong" << dendl
;
1741 goto close_current_fd
;
1744 if (!backend
->can_checkpoint()) {
1745 // mark current/ as non-snapshotted so that we don't rollback away
1747 int r
= ::creat(nosnapfn
, 0644);
1750 derr
<< __FUNC__
<< ": failed to create current/nosnap" << dendl
;
1751 goto close_current_fd
;
1753 VOID_TEMP_FAILURE_RETRY(::close(r
));
1755 // clear nosnap marker, if present.
1759 // check fsid with omap
1761 char omap_fsid_buf
[PATH_MAX
];
1762 struct ::stat omap_fsid_stat
;
1763 snprintf(omap_fsid_buf
, sizeof(omap_fsid_buf
), "%s/osd_uuid", omap_dir
.c_str());
1764 // if osd_uuid not exists, assume as this omap matchs corresponding osd
1765 if (::stat(omap_fsid_buf
, &omap_fsid_stat
) != 0){
1766 dout(10) << __FUNC__
<< ": osd_uuid not found under omap, "
1767 << "assume as matched."
1771 // if osd_uuid exists, compares osd_uuid with fsid
1772 omap_fsid_fd
= ::open(omap_fsid_buf
, O_RDONLY
|O_CLOEXEC
, 0644);
1773 if (omap_fsid_fd
< 0) {
1775 derr
<< __FUNC__
<< ": error opening '" << omap_fsid_buf
<< "': "
1776 << cpp_strerror(ret
)
1778 goto close_current_fd
;
1780 ret
= read_fsid(omap_fsid_fd
, &omap_fsid
);
1781 VOID_TEMP_FAILURE_RETRY(::close(omap_fsid_fd
));
1783 derr
<< __FUNC__
<< ": error reading omap_fsid_fd"
1784 << ", omap_fsid = " << omap_fsid
1785 << cpp_strerror(ret
)
1787 goto close_current_fd
;
1789 if (fsid
!= omap_fsid
) {
1790 derr
<< __FUNC__
<< ": " << omap_fsid_buf
1791 << " has existed omap fsid " << omap_fsid
1792 << " != expected osd fsid " << fsid
1795 goto close_current_fd
;
1799 dout(0) << "start omap initiation" << dendl
;
1800 if (!(generic_flags
& SKIP_MOUNT_OMAP
)) {
1801 KeyValueDB
* omap_store
= KeyValueDB::create(cct
,
1802 superblock
.omap_backend
,
1806 derr
<< __FUNC__
<< ": Error creating " << superblock
.omap_backend
<< dendl
;
1808 goto close_current_fd
;
1811 if (superblock
.omap_backend
== "rocksdb")
1812 ret
= omap_store
->init(cct
->_conf
->filestore_rocksdb_options
);
1814 ret
= omap_store
->init();
1817 derr
<< __FUNC__
<< ": Error initializing omap_store: " << cpp_strerror(ret
) << dendl
;
1818 goto close_current_fd
;
1822 if (omap_store
->create_and_open(err
)) {
1824 omap_store
= nullptr;
1825 derr
<< __FUNC__
<< ": Error initializing " << superblock
.omap_backend
1826 << " : " << err
.str() << dendl
;
1828 goto close_current_fd
;
1831 DBObjectMap
*dbomap
= new DBObjectMap(cct
, omap_store
);
1832 ret
= dbomap
->init(do_update
);
1836 derr
<< __FUNC__
<< ": Error initializing DBObjectMap: " << ret
<< dendl
;
1837 goto close_current_fd
;
1841 if (cct
->_conf
->filestore_debug_omap_check
&& !dbomap
->check(err2
)) {
1842 derr
<< err2
.str() << dendl
;
1846 goto close_current_fd
;
1848 object_map
.reset(dbomap
);
1854 // select journal mode?
1856 if (!m_filestore_journal_writeahead
&&
1857 !m_filestore_journal_parallel
&&
1858 !m_filestore_journal_trailing
) {
1859 if (!backend
->can_checkpoint()) {
1860 m_filestore_journal_writeahead
= true;
1861 dout(0) << __FUNC__
<< ": enabling WRITEAHEAD journal mode: checkpoint is not enabled" << dendl
;
1863 m_filestore_journal_parallel
= true;
1864 dout(0) << __FUNC__
<< ": enabling PARALLEL journal mode: fs, checkpoint is enabled" << dendl
;
1867 if (m_filestore_journal_writeahead
)
1868 dout(0) << __FUNC__
<< ": WRITEAHEAD journal mode explicitly enabled in conf" << dendl
;
1869 if (m_filestore_journal_parallel
)
1870 dout(0) << __FUNC__
<< ": PARALLEL journal mode explicitly enabled in conf" << dendl
;
1871 if (m_filestore_journal_trailing
)
1872 dout(0) << __FUNC__
<< ": TRAILING journal mode explicitly enabled in conf" << dendl
;
1874 if (m_filestore_journal_writeahead
)
1875 journal
->set_wait_on_full(true);
1877 dout(0) << __FUNC__
<< ": no journal" << dendl
;
1880 ret
= _sanity_check_fs();
1882 derr
<< __FUNC__
<< ": _sanity_check_fs failed with error "
1884 goto close_current_fd
;
1887 // Cleanup possibly invalid collections
1889 vector
<coll_t
> collections
;
1890 ret
= list_collections(collections
, true);
1892 derr
<< "Error " << ret
<< " while listing collections" << dendl
;
1893 goto close_current_fd
;
1895 for (vector
<coll_t
>::iterator i
= collections
.begin();
1896 i
!= collections
.end();
1899 ret
= get_index(*i
, &index
);
1901 derr
<< "Unable to mount index " << *i
1902 << " with error: " << ret
<< dendl
;
1903 goto close_current_fd
;
1905 ceph_assert(index
.index
);
1906 std::unique_lock l
{(index
.index
)->access_lock
};
1911 if (!m_disable_wbthrottle
) {
1914 dout(0) << __FUNC__
<< ": INFO: WbThrottle is disabled" << dendl
;
1915 if (cct
->_conf
->filestore_odsync_write
) {
1916 dout(0) << __FUNC__
<< ": INFO: O_DSYNC write is enabled" << dendl
;
1919 sync_thread
.create("filestore_sync");
1921 if (!(generic_flags
& SKIP_JOURNAL_REPLAY
)) {
1922 ret
= journal_replay(initial_op_seq
);
1924 derr
<< __FUNC__
<< ": failed to open journal " << journalpath
<< ": " << cpp_strerror(ret
) << dendl
;
1925 if (ret
== -ENOTTY
) {
1926 derr
<< "maybe journal is not pointing to a block device and its size "
1927 << "wasn't configured?" << dendl
;
1936 if (cct
->_conf
->filestore_debug_omap_check
&& !object_map
->check(err2
)) {
1937 derr
<< err2
.str() << dendl
;
1943 init_temp_collections();
1948 for (vector
<Finisher
*>::iterator it
= ondisk_finishers
.begin(); it
!= ondisk_finishers
.end(); ++it
) {
1951 for (vector
<Finisher
*>::iterator it
= apply_finishers
.begin(); it
!= apply_finishers
.end(); ++it
) {
1958 if (cct
->_conf
->filestore_update_to
>= (int)get_target_version()) {
1959 int err
= upgrade();
1961 derr
<< "error converting store" << dendl
;
1973 std::lock_guard l
{lock
};
1975 sync_cond
.notify_all();
1978 if (!m_disable_wbthrottle
) {
1982 VOID_TEMP_FAILURE_RETRY(::close(current_fd
));
1985 VOID_TEMP_FAILURE_RETRY(::close(basedir_fd
));
1988 VOID_TEMP_FAILURE_RETRY(::close(fsid_fd
));
1991 ceph_assert(!m_filestore_fail_eio
|| ret
!= -EIO
);
1998 void FileStore::init_temp_collections()
2000 dout(10) << __FUNC__
<< dendl
;
2002 int r
= list_collections(ls
, true);
2003 ceph_assert(r
>= 0);
2005 dout(20) << " ls " << ls
<< dendl
;
2007 SequencerPosition spos
;
2010 for (vector
<coll_t
>::iterator p
= ls
.begin(); p
!= ls
.end(); ++p
)
2013 dout(20) << " temps " << temps
<< dendl
;
2015 for (vector
<coll_t
>::iterator p
= ls
.begin(); p
!= ls
.end(); ++p
) {
2018 coll_map
[*p
] = ceph::make_ref
<OpSequencer
>(cct
, ++next_osr_id
, *p
);
2021 coll_t temp
= p
->get_temp();
2022 if (temps
.count(temp
)) {
2025 dout(10) << __FUNC__
<< ": creating " << temp
<< dendl
;
2026 r
= _create_collection(temp
, 0, spos
);
2027 ceph_assert(r
== 0);
2031 for (set
<coll_t
>::iterator p
= temps
.begin(); p
!= temps
.end(); ++p
) {
2032 dout(10) << __FUNC__
<< ": removing stray " << *p
<< dendl
;
2033 r
= _collection_remove_recursive(*p
, spos
);
2034 ceph_assert(r
== 0);
2038 int FileStore::umount()
2040 dout(5) << __FUNC__
<< ": " << basedir
<< dendl
;
2047 std::lock_guard
l(coll_lock
);
2052 std::lock_guard l
{lock
};
2054 sync_cond
.notify_all();
2057 if (!m_disable_wbthrottle
){
2063 if (!(generic_flags
& SKIP_JOURNAL_REPLAY
))
2064 journal_write_close();
2066 for (vector
<Finisher
*>::iterator it
= ondisk_finishers
.begin(); it
!= ondisk_finishers
.end(); ++it
) {
2069 for (vector
<Finisher
*>::iterator it
= apply_finishers
.begin(); it
!= apply_finishers
.end(); ++it
) {
2074 VOID_TEMP_FAILURE_RETRY(::close(vdo_fd
));
2078 VOID_TEMP_FAILURE_RETRY(::close(fsid_fd
));
2082 VOID_TEMP_FAILURE_RETRY(::close(op_fd
));
2085 if (current_fd
>= 0) {
2086 VOID_TEMP_FAILURE_RETRY(::close(current_fd
));
2089 if (basedir_fd
>= 0) {
2090 VOID_TEMP_FAILURE_RETRY(::close(basedir_fd
));
2102 std::lock_guard l
{sync_entry_timeo_lock
};
2111 /// -----------------------------
2113 // keep OpSequencer handles alive for all time so that a sequence
2114 // that removes a collection and creates a new one will not allow
2115 // two sequencers for the same collection to be alive at once.
2117 ObjectStore::CollectionHandle
FileStore::open_collection(const coll_t
& c
)
2119 std::lock_guard l
{coll_lock
};
2120 auto p
= coll_map
.find(c
);
2121 if (p
== coll_map
.end()) {
2122 return CollectionHandle();
2127 ObjectStore::CollectionHandle
FileStore::create_new_collection(const coll_t
& c
)
2129 std::lock_guard l
{coll_lock
};
2130 auto p
= coll_map
.find(c
);
2131 if (p
== coll_map
.end()) {
2132 auto r
= ceph::make_ref
<OpSequencer
>(cct
, ++next_osr_id
, c
);
2141 /// -----------------------------
2143 FileStore::Op
*FileStore::build_op(vector
<Transaction
>& tls
,
2144 Context
*onreadable
,
2145 Context
*onreadable_sync
,
2146 TrackedOpRef osd_op
)
2148 uint64_t bytes
= 0, ops
= 0;
2149 for (vector
<Transaction
>::iterator p
= tls
.begin();
2152 bytes
+= (*p
).get_num_bytes();
2153 ops
+= (*p
).get_num_ops();
2157 o
->start
= ceph_clock_now();
2158 o
->tls
= std::move(tls
);
2159 o
->onreadable
= onreadable
;
2160 o
->onreadable_sync
= onreadable_sync
;
2169 void FileStore::queue_op(OpSequencer
*osr
, Op
*o
)
2171 // queue op on sequencer, then queue sequencer for the threadpool,
2172 // so that regardless of which order the threads pick up the
2173 // sequencer, the op order will be preserved.
2176 o
->trace
.event("queued");
2178 logger
->inc(l_filestore_ops
);
2179 logger
->inc(l_filestore_bytes
, o
->bytes
);
2181 dout(5) << __FUNC__
<< ": " << o
<< " seq " << o
->op
2183 << " " << o
->bytes
<< " bytes"
2184 << " (queue has " << throttle_ops
.get_current() << " ops and " << throttle_bytes
.get_current() << " bytes)"
2189 void FileStore::op_queue_reserve_throttle(Op
*o
)
2192 throttle_bytes
.get(o
->bytes
);
2194 logger
->set(l_filestore_op_queue_ops
, throttle_ops
.get_current());
2195 logger
->set(l_filestore_op_queue_bytes
, throttle_bytes
.get_current());
2198 void FileStore::op_queue_release_throttle(Op
*o
)
2201 throttle_bytes
.put(o
->bytes
);
2202 logger
->set(l_filestore_op_queue_ops
, throttle_ops
.get_current());
2203 logger
->set(l_filestore_op_queue_bytes
, throttle_bytes
.get_current());
2206 void FileStore::_do_op(OpSequencer
*osr
, ThreadPool::TPHandle
&handle
)
2208 if (!m_disable_wbthrottle
) {
2209 wbthrottle
.throttle();
2212 if (cct
->_conf
->filestore_inject_stall
) {
2213 int orig
= cct
->_conf
->filestore_inject_stall
;
2214 dout(5) << __FUNC__
<< ": filestore_inject_stall " << orig
<< ", sleeping" << dendl
;
2216 cct
->_conf
.set_val("filestore_inject_stall", "0");
2217 dout(5) << __FUNC__
<< ": done stalling" << dendl
;
2220 osr
->apply_lock
.lock();
2221 Op
*o
= osr
->peek_queue();
2222 o
->trace
.event("op_apply_start");
2223 apply_manager
.op_apply_start(o
->op
);
2224 dout(5) << __FUNC__
<< ": " << o
<< " seq " << o
->op
<< " " << *osr
<< " start" << dendl
;
2225 o
->trace
.event("_do_transactions start");
2226 int r
= _do_transactions(o
->tls
, o
->op
, &handle
, osr
->osr_name
);
2227 o
->trace
.event("op_apply_finish");
2228 apply_manager
.op_apply_finish(o
->op
);
2229 dout(10) << __FUNC__
<< ": " << o
<< " seq " << o
->op
<< " r = " << r
2230 << ", finisher " << o
->onreadable
<< " " << o
->onreadable_sync
<< dendl
;
2233 void FileStore::_finish_op(OpSequencer
*osr
)
2235 list
<Context
*> to_queue
;
2236 Op
*o
= osr
->dequeue(&to_queue
);
2240 utime_t lat
= ceph_clock_now();
2243 dout(10) << __FUNC__
<< ": " << o
<< " seq " << o
->op
<< " " << *osr
<< " lat " << lat
<< dendl
;
2244 osr
->apply_lock
.unlock(); // locked in _do_op
2245 o
->trace
.event("_finish_op");
2247 // called with tp lock held
2248 op_queue_release_throttle(o
);
2250 logger
->tinc(l_filestore_apply_latency
, lat
);
2252 if (o
->onreadable_sync
) {
2253 o
->onreadable_sync
->complete(0);
2255 if (o
->onreadable
) {
2256 apply_finishers
[osr
->id
% m_apply_finisher_num
]->queue(o
->onreadable
);
2258 if (!to_queue
.empty()) {
2259 apply_finishers
[osr
->id
% m_apply_finisher_num
]->queue(to_queue
);
2265 struct C_JournaledAhead
: public Context
{
2267 FileStore::OpSequencer
*osr
;
2271 C_JournaledAhead(FileStore
*f
, FileStore::OpSequencer
*os
, FileStore::Op
*o
, Context
*ondisk
):
2272 fs(f
), osr(os
), o(o
), ondisk(ondisk
) { }
2273 void finish(int r
) override
{
2274 fs
->_journaled_ahead(osr
, o
, ondisk
);
2278 int FileStore::queue_transactions(CollectionHandle
& ch
, vector
<Transaction
>& tls
,
2279 TrackedOpRef osd_op
,
2280 ThreadPool::TPHandle
*handle
)
2282 Context
*onreadable
;
2284 Context
*onreadable_sync
;
2285 ObjectStore::Transaction::collect_contexts(
2286 tls
, &onreadable
, &ondisk
, &onreadable_sync
);
2288 if (cct
->_conf
->objectstore_blackhole
) {
2289 dout(0) << __FUNC__
<< ": objectstore_blackhole = TRUE, dropping transaction"
2294 onreadable
= nullptr;
2295 delete onreadable_sync
;
2296 onreadable_sync
= nullptr;
2300 utime_t start
= ceph_clock_now();
2302 OpSequencer
*osr
= static_cast<OpSequencer
*>(ch
.get());
2303 dout(5) << __FUNC__
<< ": osr " << osr
<< " " << *osr
<< dendl
;
2305 ZTracer::Trace trace
;
2306 if (osd_op
&& osd_op
->pg_trace
) {
2307 osd_op
->store_trace
.init("filestore op", &trace_endpoint
, &osd_op
->pg_trace
);
2308 trace
= osd_op
->store_trace
;
2311 if (journal
&& journal
->is_writeable() && !m_filestore_journal_trailing
) {
2312 Op
*o
= build_op(tls
, onreadable
, onreadable_sync
, osd_op
);
2314 //prepare and encode transactions data out of lock
2316 int orig_len
= journal
->prepare_entry(o
->tls
, &tbl
);
2319 handle
->suspend_tp_timeout();
2321 op_queue_reserve_throttle(o
);
2322 journal
->reserve_throttle_and_backoff(tbl
.length());
2325 handle
->reset_tp_timeout();
2327 uint64_t op_num
= submit_manager
.op_submit_start();
2329 trace
.keyval("opnum", op_num
);
2331 if (m_filestore_do_dump
)
2332 dump_transactions(o
->tls
, o
->op
, osr
);
2334 if (m_filestore_journal_parallel
) {
2335 dout(5) << __FUNC__
<< ": (parallel) " << o
->op
<< " " << o
->tls
<< dendl
;
2337 trace
.keyval("journal mode", "parallel");
2338 trace
.event("journal started");
2339 _op_journal_transactions(tbl
, orig_len
, o
->op
, ondisk
, osd_op
);
2341 // queue inside submit_manager op submission lock
2343 trace
.event("op queued");
2344 } else if (m_filestore_journal_writeahead
) {
2345 dout(5) << __FUNC__
<< ": (writeahead) " << o
->op
<< " " << o
->tls
<< dendl
;
2347 osr
->queue_journal(o
);
2349 trace
.keyval("journal mode", "writeahead");
2350 trace
.event("journal started");
2351 _op_journal_transactions(tbl
, orig_len
, o
->op
,
2352 new C_JournaledAhead(this, osr
, o
, ondisk
),
2357 submit_manager
.op_submit_finish(op_num
);
2358 utime_t end
= ceph_clock_now();
2359 logger
->tinc(l_filestore_queue_transaction_latency_avg
, end
- start
);
2364 Op
*o
= build_op(tls
, onreadable
, onreadable_sync
, osd_op
);
2365 dout(5) << __FUNC__
<< ": (no journal) " << o
<< " " << tls
<< dendl
;
2368 handle
->suspend_tp_timeout();
2370 op_queue_reserve_throttle(o
);
2373 handle
->reset_tp_timeout();
2375 uint64_t op_num
= submit_manager
.op_submit_start();
2378 if (m_filestore_do_dump
)
2379 dump_transactions(o
->tls
, o
->op
, osr
);
2382 trace
.keyval("opnum", op_num
);
2383 trace
.keyval("journal mode", "none");
2384 trace
.event("op queued");
2387 apply_manager
.add_waiter(op_num
, ondisk
);
2388 submit_manager
.op_submit_finish(op_num
);
2389 utime_t end
= ceph_clock_now();
2390 logger
->tinc(l_filestore_queue_transaction_latency_avg
, end
- start
);
2394 ceph_assert(journal
);
2395 //prepare and encode transactions data out of lock
2398 if (journal
->is_writeable()) {
2399 orig_len
= journal
->prepare_entry(tls
, &tbl
);
2401 uint64_t op
= submit_manager
.op_submit_start();
2402 dout(5) << __FUNC__
<< ": (trailing journal) " << op
<< " " << tls
<< dendl
;
2404 if (m_filestore_do_dump
)
2405 dump_transactions(tls
, op
, osr
);
2407 trace
.event("op_apply_start");
2408 trace
.keyval("opnum", op
);
2409 trace
.keyval("journal mode", "trailing");
2410 apply_manager
.op_apply_start(op
);
2411 trace
.event("do_transactions");
2412 int r
= do_transactions(tls
, op
);
2415 trace
.event("journal started");
2416 _op_journal_transactions(tbl
, orig_len
, op
, ondisk
, osd_op
);
2422 // start on_readable finisher after we queue journal item, as on_readable callback
2423 // is allowed to delete the Transaction
2424 if (onreadable_sync
) {
2425 onreadable_sync
->complete(r
);
2427 apply_finishers
[osr
->id
% m_apply_finisher_num
]->queue(onreadable
, r
);
2429 submit_manager
.op_submit_finish(op
);
2430 trace
.event("op_apply_finish");
2431 apply_manager
.op_apply_finish(op
);
2433 utime_t end
= ceph_clock_now();
2434 logger
->tinc(l_filestore_queue_transaction_latency_avg
, end
- start
);
2438 void FileStore::_journaled_ahead(OpSequencer
*osr
, Op
*o
, Context
*ondisk
)
2440 dout(5) << __FUNC__
<< ": " << o
<< " seq " << o
->op
<< " " << *osr
<< " " << o
->tls
<< dendl
;
2442 o
->trace
.event("writeahead journal finished");
2444 // this should queue in order because the journal does it's completions in order.
2447 list
<Context
*> to_queue
;
2448 osr
->dequeue_journal(&to_queue
);
2450 // do ondisk completions async, to prevent any onreadable_sync completions
2451 // getting blocked behind an ondisk completion.
2453 dout(10) << " queueing ondisk " << ondisk
<< dendl
;
2454 ondisk_finishers
[osr
->id
% m_ondisk_finisher_num
]->queue(ondisk
);
2456 if (!to_queue
.empty()) {
2457 ondisk_finishers
[osr
->id
% m_ondisk_finisher_num
]->queue(to_queue
);
2461 int FileStore::_do_transactions(
2462 vector
<Transaction
> &tls
,
2464 ThreadPool::TPHandle
*handle
,
2465 const char *osr_name
)
2469 for (vector
<Transaction
>::iterator p
= tls
.begin();
2472 _do_transaction(*p
, op_seq
, trans_num
, handle
, osr_name
);
2474 handle
->reset_tp_timeout();
2480 void FileStore::_set_global_replay_guard(const coll_t
& cid
,
2481 const SequencerPosition
&spos
)
2483 if (backend
->can_checkpoint())
2486 // sync all previous operations on this sequencer
2487 int ret
= object_map
->sync();
2489 derr
<< __FUNC__
<< ": omap sync error " << cpp_strerror(ret
) << dendl
;
2490 ceph_abort_msg("_set_global_replay_guard failed");
2492 ret
= sync_filesystem(basedir_fd
);
2494 derr
<< __FUNC__
<< ": sync_filesystem error " << cpp_strerror(ret
) << dendl
;
2495 ceph_abort_msg("_set_global_replay_guard failed");
2499 get_cdir(cid
, fn
, sizeof(fn
));
2500 int fd
= ::open(fn
, O_RDONLY
|O_CLOEXEC
);
2503 derr
<< __FUNC__
<< ": " << cid
<< " error " << cpp_strerror(err
) << dendl
;
2504 ceph_abort_msg("_set_global_replay_guard failed");
2509 // then record that we did it
2512 int r
= chain_fsetxattr
<true, true>(
2513 fd
, GLOBAL_REPLAY_GUARD_XATTR
, v
.c_str(), v
.length());
2515 derr
<< __FUNC__
<< ": fsetxattr " << GLOBAL_REPLAY_GUARD_XATTR
2516 << " got " << cpp_strerror(r
) << dendl
;
2517 ceph_abort_msg("fsetxattr failed");
2520 // and make sure our xattr is durable.
2523 derr
<< __func__
<< " fsync failed: " << cpp_strerror(errno
) << dendl
;
2529 VOID_TEMP_FAILURE_RETRY(::close(fd
));
2530 dout(10) << __FUNC__
<< ": " << spos
<< " done" << dendl
;
2533 int FileStore::_check_global_replay_guard(const coll_t
& cid
,
2534 const SequencerPosition
& spos
)
2537 get_cdir(cid
, fn
, sizeof(fn
));
2538 int fd
= ::open(fn
, O_RDONLY
|O_CLOEXEC
);
2540 dout(10) << __FUNC__
<< ": " << cid
<< " dne" << dendl
;
2541 return 1; // if collection does not exist, there is no guard, and we can replay.
2545 int r
= chain_fgetxattr(fd
, GLOBAL_REPLAY_GUARD_XATTR
, buf
, sizeof(buf
));
2547 dout(20) << __FUNC__
<< ": no xattr" << dendl
;
2548 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
2549 VOID_TEMP_FAILURE_RETRY(::close(fd
));
2550 return 1; // no xattr
2555 SequencerPosition opos
;
2556 auto p
= bl
.cbegin();
2559 VOID_TEMP_FAILURE_RETRY(::close(fd
));
2560 return spos
>= opos
? 1 : -1;
2564 void FileStore::_set_replay_guard(const coll_t
& cid
,
2565 const SequencerPosition
&spos
,
2566 bool in_progress
=false)
2569 get_cdir(cid
, fn
, sizeof(fn
));
2570 int fd
= ::open(fn
, O_RDONLY
|O_CLOEXEC
);
2573 derr
<< __FUNC__
<< ": " << cid
<< " error " << cpp_strerror(err
) << dendl
;
2574 ceph_abort_msg("_set_replay_guard failed");
2576 _set_replay_guard(fd
, spos
, 0, in_progress
);
2577 VOID_TEMP_FAILURE_RETRY(::close(fd
));
2581 void FileStore::_set_replay_guard(int fd
,
2582 const SequencerPosition
& spos
,
2583 const ghobject_t
*hoid
,
2586 if (backend
->can_checkpoint())
2589 dout(10) << __FUNC__
<< ": " << spos
<< (in_progress
? " START" : "") << dendl
;
2593 // first make sure the previous operation commits
2594 int r
= ::fsync(fd
);
2596 derr
<< __func__
<< " fsync failed: " << cpp_strerror(errno
) << dendl
;
2601 // sync object_map too. even if this object has a header or keys,
2602 // it have had them in the past and then removed them, so always
2604 object_map
->sync(hoid
, &spos
);
2609 // then record that we did it
2612 encode(in_progress
, v
);
2613 r
= chain_fsetxattr
<true, true>(
2614 fd
, REPLAY_GUARD_XATTR
, v
.c_str(), v
.length());
2616 derr
<< "fsetxattr " << REPLAY_GUARD_XATTR
<< " got " << cpp_strerror(r
) << dendl
;
2617 ceph_abort_msg("fsetxattr failed");
2620 // and make sure our xattr is durable.
2623 derr
<< __func__
<< " fsync failed: " << cpp_strerror(errno
) << dendl
;
2629 dout(10) << __FUNC__
<< ": " << spos
<< " done" << dendl
;
2632 void FileStore::_close_replay_guard(const coll_t
& cid
,
2633 const SequencerPosition
&spos
)
2636 get_cdir(cid
, fn
, sizeof(fn
));
2637 int fd
= ::open(fn
, O_RDONLY
|O_CLOEXEC
);
2640 derr
<< __FUNC__
<< ": " << cid
<< " error " << cpp_strerror(err
) << dendl
;
2641 ceph_abort_msg("_close_replay_guard failed");
2643 _close_replay_guard(fd
, spos
);
2644 VOID_TEMP_FAILURE_RETRY(::close(fd
));
2647 void FileStore::_close_replay_guard(int fd
, const SequencerPosition
& spos
,
2648 const ghobject_t
*hoid
)
2650 if (backend
->can_checkpoint())
2653 dout(10) << __FUNC__
<< ": " << spos
<< dendl
;
2657 // sync object_map too. even if this object has a header or keys,
2658 // it have had them in the past and then removed them, so always
2660 object_map
->sync(hoid
, &spos
);
2662 // then record that we are done with this operation
2665 bool in_progress
= false;
2666 encode(in_progress
, v
);
2667 int r
= chain_fsetxattr
<true, true>(
2668 fd
, REPLAY_GUARD_XATTR
, v
.c_str(), v
.length());
2670 derr
<< "fsetxattr " << REPLAY_GUARD_XATTR
<< " got " << cpp_strerror(r
) << dendl
;
2671 ceph_abort_msg("fsetxattr failed");
2674 // and make sure our xattr is durable.
2677 derr
<< __func__
<< " fsync failed: " << cpp_strerror(errno
) << dendl
;
2683 dout(10) << __FUNC__
<< ": " << spos
<< " done" << dendl
;
2686 int FileStore::_check_replay_guard(const coll_t
& cid
, const ghobject_t
&oid
,
2687 const SequencerPosition
& spos
)
2689 if (!replaying
|| backend
->can_checkpoint())
2692 int r
= _check_global_replay_guard(cid
, spos
);
2697 r
= lfn_open(cid
, oid
, false, &fd
);
2699 dout(10) << __FUNC__
<< ": " << cid
<< " " << oid
<< " dne" << dendl
;
2700 return 1; // if file does not exist, there is no guard, and we can replay.
2702 int ret
= _check_replay_guard(**fd
, spos
);
2707 int FileStore::_check_replay_guard(const coll_t
& cid
, const SequencerPosition
& spos
)
2709 if (!replaying
|| backend
->can_checkpoint())
2713 get_cdir(cid
, fn
, sizeof(fn
));
2714 int fd
= ::open(fn
, O_RDONLY
|O_CLOEXEC
);
2716 dout(10) << __FUNC__
<< ": " << cid
<< " dne" << dendl
;
2717 return 1; // if collection does not exist, there is no guard, and we can replay.
2719 int ret
= _check_replay_guard(fd
, spos
);
2720 VOID_TEMP_FAILURE_RETRY(::close(fd
));
2724 int FileStore::_check_replay_guard(int fd
, const SequencerPosition
& spos
)
2726 if (!replaying
|| backend
->can_checkpoint())
2730 int r
= chain_fgetxattr(fd
, REPLAY_GUARD_XATTR
, buf
, sizeof(buf
));
2732 dout(20) << __FUNC__
<< ": no xattr" << dendl
;
2733 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
2734 return 1; // no xattr
2739 SequencerPosition opos
;
2740 auto p
= bl
.cbegin();
2742 bool in_progress
= false;
2743 if (!p
.end()) // older journals don't have this
2744 decode(in_progress
, p
);
2746 dout(10) << __FUNC__
<< ": object has " << opos
<< " > current pos " << spos
2747 << ", now or in future, SKIPPING REPLAY" << dendl
;
2749 } else if (opos
== spos
) {
2751 dout(10) << __FUNC__
<< ": object has " << opos
<< " == current pos " << spos
2752 << ", in_progress=true, CONDITIONAL REPLAY" << dendl
;
2755 dout(10) << __FUNC__
<< ": object has " << opos
<< " == current pos " << spos
2756 << ", in_progress=false, SKIPPING REPLAY" << dendl
;
2760 dout(10) << __FUNC__
<< ": object has " << opos
<< " < current pos " << spos
2761 << ", in past, will replay" << dendl
;
2766 void FileStore::_do_transaction(
2767 Transaction
& t
, uint64_t op_seq
, int trans_num
,
2768 ThreadPool::TPHandle
*handle
,
2769 const char *osr_name
)
2771 dout(10) << __FUNC__
<< ": on " << &t
<< dendl
;
2773 Transaction::iterator i
= t
.begin();
2775 SequencerPosition
spos(op_seq
, trans_num
, 0);
2776 while (i
.have_op()) {
2778 handle
->reset_tp_timeout();
2780 Transaction::Op
*op
= i
.decode_op();
2786 case Transaction::OP_NOP
:
2788 case Transaction::OP_TOUCH
:
2789 case Transaction::OP_CREATE
:
2791 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2792 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2793 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2794 _cid
: _cid
.get_temp();
2795 tracepoint(objectstore
, touch_enter
, osr_name
);
2796 if (_check_replay_guard(cid
, oid
, spos
) > 0)
2797 r
= _touch(cid
, oid
);
2798 tracepoint(objectstore
, touch_exit
, r
);
2802 case Transaction::OP_WRITE
:
2804 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2805 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2806 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2807 _cid
: _cid
.get_temp();
2808 uint64_t off
= op
->off
;
2809 uint64_t len
= op
->len
;
2810 uint32_t fadvise_flags
= i
.get_fadvise_flags();
2813 tracepoint(objectstore
, write_enter
, osr_name
, off
, len
);
2814 if (_check_replay_guard(cid
, oid
, spos
) > 0)
2815 r
= _write(cid
, oid
, off
, len
, bl
, fadvise_flags
);
2816 tracepoint(objectstore
, write_exit
, r
);
2820 case Transaction::OP_ZERO
:
2822 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2823 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2824 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2825 _cid
: _cid
.get_temp();
2826 uint64_t off
= op
->off
;
2827 uint64_t len
= op
->len
;
2828 tracepoint(objectstore
, zero_enter
, osr_name
, off
, len
);
2829 if (_check_replay_guard(cid
, oid
, spos
) > 0)
2830 r
= _zero(cid
, oid
, off
, len
);
2831 tracepoint(objectstore
, zero_exit
, r
);
2835 case Transaction::OP_TRIMCACHE
:
2837 // deprecated, no-op
2841 case Transaction::OP_TRUNCATE
:
2843 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2844 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2845 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2846 _cid
: _cid
.get_temp();
2847 uint64_t off
= op
->off
;
2848 tracepoint(objectstore
, truncate_enter
, osr_name
, off
);
2849 if (_check_replay_guard(cid
, oid
, spos
) > 0)
2850 r
= _truncate(cid
, oid
, off
);
2851 tracepoint(objectstore
, truncate_exit
, r
);
2855 case Transaction::OP_REMOVE
:
2857 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2858 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2859 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2860 _cid
: _cid
.get_temp();
2861 tracepoint(objectstore
, remove_enter
, osr_name
);
2862 if (_check_replay_guard(cid
, oid
, spos
) > 0)
2863 r
= _remove(cid
, oid
, spos
);
2864 tracepoint(objectstore
, remove_exit
, r
);
2868 case Transaction::OP_SETATTR
:
2870 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2871 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2872 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2873 _cid
: _cid
.get_temp();
2874 string name
= i
.decode_string();
2877 tracepoint(objectstore
, setattr_enter
, osr_name
);
2878 if (_check_replay_guard(cid
, oid
, spos
) > 0) {
2879 map
<string
, bufferptr
> to_set
;
2880 to_set
[name
] = bufferptr(bl
.c_str(), bl
.length());
2881 r
= _setattrs(cid
, oid
, to_set
, spos
);
2883 dout(0) << " ENOSPC on setxattr on " << cid
<< "/" << oid
2884 << " name " << name
<< " size " << bl
.length() << dendl
;
2886 tracepoint(objectstore
, setattr_exit
, r
);
2890 case Transaction::OP_SETATTRS
:
2892 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2893 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2894 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2895 _cid
: _cid
.get_temp();
2896 map
<string
, bufferptr
> aset
;
2897 i
.decode_attrset(aset
);
2898 tracepoint(objectstore
, setattrs_enter
, osr_name
);
2899 if (_check_replay_guard(cid
, oid
, spos
) > 0)
2900 r
= _setattrs(cid
, oid
, aset
, spos
);
2901 tracepoint(objectstore
, setattrs_exit
, r
);
2903 dout(0) << " ENOSPC on setxattrs on " << cid
<< "/" << oid
<< dendl
;
2907 case Transaction::OP_RMATTR
:
2909 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2910 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2911 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2912 _cid
: _cid
.get_temp();
2913 string name
= i
.decode_string();
2914 tracepoint(objectstore
, rmattr_enter
, osr_name
);
2915 if (_check_replay_guard(cid
, oid
, spos
) > 0)
2916 r
= _rmattr(cid
, oid
, name
.c_str(), spos
);
2917 tracepoint(objectstore
, rmattr_exit
, r
);
2921 case Transaction::OP_RMATTRS
:
2923 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2924 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2925 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2926 _cid
: _cid
.get_temp();
2927 tracepoint(objectstore
, rmattrs_enter
, osr_name
);
2928 if (_check_replay_guard(cid
, oid
, spos
) > 0)
2929 r
= _rmattrs(cid
, oid
, spos
);
2930 tracepoint(objectstore
, rmattrs_exit
, r
);
2934 case Transaction::OP_CLONE
:
2936 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2937 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2938 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2939 _cid
: _cid
.get_temp();
2940 const ghobject_t
&noid
= i
.get_oid(op
->dest_oid
);
2941 tracepoint(objectstore
, clone_enter
, osr_name
);
2942 r
= _clone(cid
, oid
, noid
, spos
);
2943 tracepoint(objectstore
, clone_exit
, r
);
2947 case Transaction::OP_CLONERANGE
:
2949 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2950 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2951 const ghobject_t
&noid
= i
.get_oid(op
->dest_oid
);
2952 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2953 _cid
: _cid
.get_temp();
2954 const coll_t
&ncid
= !_need_temp_object_collection(_cid
, noid
) ?
2955 _cid
: _cid
.get_temp();
2956 uint64_t off
= op
->off
;
2957 uint64_t len
= op
->len
;
2958 tracepoint(objectstore
, clone_range_enter
, osr_name
, len
);
2959 r
= _clone_range(cid
, oid
, ncid
, noid
, off
, len
, off
, spos
);
2960 tracepoint(objectstore
, clone_range_exit
, r
);
2964 case Transaction::OP_CLONERANGE2
:
2966 const coll_t
&_cid
= i
.get_cid(op
->cid
);
2967 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
2968 const ghobject_t
&noid
= i
.get_oid(op
->dest_oid
);
2969 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
2970 _cid
: _cid
.get_temp();
2971 const coll_t
&ncid
= !_need_temp_object_collection(_cid
, noid
) ?
2972 _cid
: _cid
.get_temp();
2973 uint64_t srcoff
= op
->off
;
2974 uint64_t len
= op
->len
;
2975 uint64_t dstoff
= op
->dest_off
;
2976 tracepoint(objectstore
, clone_range2_enter
, osr_name
, len
);
2977 r
= _clone_range(cid
, oid
, ncid
, noid
, srcoff
, len
, dstoff
, spos
);
2978 tracepoint(objectstore
, clone_range2_exit
, r
);
2982 case Transaction::OP_MKCOLL
:
2984 const coll_t
&cid
= i
.get_cid(op
->cid
);
2985 tracepoint(objectstore
, mkcoll_enter
, osr_name
);
2986 if (_check_replay_guard(cid
, spos
) > 0)
2987 r
= _create_collection(cid
, op
->split_bits
, spos
);
2988 tracepoint(objectstore
, mkcoll_exit
, r
);
2992 case Transaction::OP_COLL_SET_BITS
:
2994 const coll_t
&cid
= i
.get_cid(op
->cid
);
2995 int bits
= op
->split_bits
;
2996 r
= _collection_set_bits(cid
, bits
);
3000 case Transaction::OP_COLL_HINT
:
3002 const coll_t
&cid
= i
.get_cid(op
->cid
);
3003 uint32_t type
= op
->hint_type
;
3006 auto hiter
= hint
.cbegin();
3007 if (type
== Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS
) {
3010 decode(pg_num
, hiter
);
3011 decode(num_objs
, hiter
);
3012 if (_check_replay_guard(cid
, spos
) > 0) {
3013 r
= _collection_hint_expected_num_objs(cid
, pg_num
, num_objs
, spos
);
3017 dout(10) << "Unrecognized collection hint type: " << type
<< dendl
;
3022 case Transaction::OP_RMCOLL
:
3024 const coll_t
&cid
= i
.get_cid(op
->cid
);
3025 tracepoint(objectstore
, rmcoll_enter
, osr_name
);
3026 if (_check_replay_guard(cid
, spos
) > 0)
3027 r
= _destroy_collection(cid
);
3028 tracepoint(objectstore
, rmcoll_exit
, r
);
3032 case Transaction::OP_COLL_ADD
:
3034 const coll_t
&ocid
= i
.get_cid(op
->cid
);
3035 const coll_t
&ncid
= i
.get_cid(op
->dest_cid
);
3036 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
3038 ceph_assert(oid
.hobj
.pool
>= -1);
3040 // always followed by OP_COLL_REMOVE
3041 Transaction::Op
*op2
= i
.decode_op();
3042 const coll_t
&ocid2
= i
.get_cid(op2
->cid
);
3043 const ghobject_t
&oid2
= i
.get_oid(op2
->oid
);
3044 ceph_assert(op2
->op
== Transaction::OP_COLL_REMOVE
);
3045 ceph_assert(ocid2
== ocid
);
3046 ceph_assert(oid2
== oid
);
3048 tracepoint(objectstore
, coll_add_enter
);
3049 r
= _collection_add(ncid
, ocid
, oid
, spos
);
3050 tracepoint(objectstore
, coll_add_exit
, r
);
3054 tracepoint(objectstore
, coll_remove_enter
, osr_name
);
3055 if (_check_replay_guard(ocid
, oid
, spos
) > 0)
3056 r
= _remove(ocid
, oid
, spos
);
3057 tracepoint(objectstore
, coll_remove_exit
, r
);
3061 case Transaction::OP_COLL_MOVE
:
3063 // WARNING: this is deprecated and buggy; only here to replay old journals.
3064 const coll_t
&ocid
= i
.get_cid(op
->cid
);
3065 const coll_t
&ncid
= i
.get_cid(op
->dest_cid
);
3066 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
3067 tracepoint(objectstore
, coll_move_enter
);
3068 r
= _collection_add(ocid
, ncid
, oid
, spos
);
3070 (_check_replay_guard(ocid
, oid
, spos
) > 0))
3071 r
= _remove(ocid
, oid
, spos
);
3072 tracepoint(objectstore
, coll_move_exit
, r
);
3076 case Transaction::OP_COLL_MOVE_RENAME
:
3078 const coll_t
&_oldcid
= i
.get_cid(op
->cid
);
3079 const ghobject_t
&oldoid
= i
.get_oid(op
->oid
);
3080 const coll_t
&_newcid
= i
.get_cid(op
->dest_cid
);
3081 const ghobject_t
&newoid
= i
.get_oid(op
->dest_oid
);
3082 const coll_t
&oldcid
= !_need_temp_object_collection(_oldcid
, oldoid
) ?
3083 _oldcid
: _oldcid
.get_temp();
3084 const coll_t
&newcid
= !_need_temp_object_collection(_newcid
, newoid
) ?
3085 _oldcid
: _newcid
.get_temp();
3086 tracepoint(objectstore
, coll_move_rename_enter
);
3087 r
= _collection_move_rename(oldcid
, oldoid
, newcid
, newoid
, spos
);
3088 tracepoint(objectstore
, coll_move_rename_exit
, r
);
3092 case Transaction::OP_TRY_RENAME
:
3094 const coll_t
&_cid
= i
.get_cid(op
->cid
);
3095 const ghobject_t
&oldoid
= i
.get_oid(op
->oid
);
3096 const ghobject_t
&newoid
= i
.get_oid(op
->dest_oid
);
3097 const coll_t
&oldcid
= !_need_temp_object_collection(_cid
, oldoid
) ?
3098 _cid
: _cid
.get_temp();
3099 const coll_t
&newcid
= !_need_temp_object_collection(_cid
, newoid
) ?
3100 _cid
: _cid
.get_temp();
3101 tracepoint(objectstore
, coll_try_rename_enter
);
3102 r
= _collection_move_rename(oldcid
, oldoid
, newcid
, newoid
, spos
, true);
3103 tracepoint(objectstore
, coll_try_rename_exit
, r
);
3107 case Transaction::OP_COLL_SETATTR
:
3108 case Transaction::OP_COLL_RMATTR
:
3109 ceph_abort_msg("collection attr methods no longer implemented");
3112 case Transaction::OP_COLL_RENAME
:
3118 case Transaction::OP_OMAP_CLEAR
:
3120 const coll_t
&_cid
= i
.get_cid(op
->cid
);
3121 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
3122 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
3123 _cid
: _cid
.get_temp();
3124 tracepoint(objectstore
, omap_clear_enter
, osr_name
);
3125 if (_check_replay_guard(cid
, oid
, spos
) > 0)
3126 r
= _omap_clear(cid
, oid
, spos
);
3127 tracepoint(objectstore
, omap_clear_exit
, r
);
3130 case Transaction::OP_OMAP_SETKEYS
:
3132 const coll_t
&_cid
= i
.get_cid(op
->cid
);
3133 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
3134 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
3135 _cid
: _cid
.get_temp();
3136 map
<string
, bufferlist
> aset
;
3137 i
.decode_attrset(aset
);
3138 tracepoint(objectstore
, omap_setkeys_enter
, osr_name
);
3139 if (_check_replay_guard(cid
, oid
, spos
) > 0)
3140 r
= _omap_setkeys(cid
, oid
, aset
, spos
);
3141 tracepoint(objectstore
, omap_setkeys_exit
, r
);
3144 case Transaction::OP_OMAP_RMKEYS
:
3146 const coll_t
&_cid
= i
.get_cid(op
->cid
);
3147 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
3148 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
3149 _cid
: _cid
.get_temp();
3151 i
.decode_keyset(keys
);
3152 tracepoint(objectstore
, omap_rmkeys_enter
, osr_name
);
3153 if (_check_replay_guard(cid
, oid
, spos
) > 0)
3154 r
= _omap_rmkeys(cid
, oid
, keys
, spos
);
3155 tracepoint(objectstore
, omap_rmkeys_exit
, r
);
3158 case Transaction::OP_OMAP_RMKEYRANGE
:
3160 const coll_t
&_cid
= i
.get_cid(op
->cid
);
3161 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
3162 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
3163 _cid
: _cid
.get_temp();
3165 first
= i
.decode_string();
3166 last
= i
.decode_string();
3167 tracepoint(objectstore
, omap_rmkeyrange_enter
, osr_name
);
3168 if (_check_replay_guard(cid
, oid
, spos
) > 0)
3169 r
= _omap_rmkeyrange(cid
, oid
, first
, last
, spos
);
3170 tracepoint(objectstore
, omap_rmkeyrange_exit
, r
);
3173 case Transaction::OP_OMAP_SETHEADER
:
3175 const coll_t
&_cid
= i
.get_cid(op
->cid
);
3176 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
3177 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
3178 _cid
: _cid
.get_temp();
3181 tracepoint(objectstore
, omap_setheader_enter
, osr_name
);
3182 if (_check_replay_guard(cid
, oid
, spos
) > 0)
3183 r
= _omap_setheader(cid
, oid
, bl
, spos
);
3184 tracepoint(objectstore
, omap_setheader_exit
, r
);
3187 case Transaction::OP_SPLIT_COLLECTION
:
3189 ceph_abort_msg("not legacy journal; upgrade to firefly first");
3192 case Transaction::OP_SPLIT_COLLECTION2
:
3194 coll_t cid
= i
.get_cid(op
->cid
);
3195 uint32_t bits
= op
->split_bits
;
3196 uint32_t rem
= op
->split_rem
;
3197 coll_t dest
= i
.get_cid(op
->dest_cid
);
3198 tracepoint(objectstore
, split_coll2_enter
, osr_name
);
3199 r
= _split_collection(cid
, bits
, rem
, dest
, spos
);
3200 tracepoint(objectstore
, split_coll2_exit
, r
);
3204 case Transaction::OP_MERGE_COLLECTION
:
3206 coll_t cid
= i
.get_cid(op
->cid
);
3207 uint32_t bits
= op
->split_bits
;
3208 coll_t dest
= i
.get_cid(op
->dest_cid
);
3209 tracepoint(objectstore
, merge_coll_enter
, osr_name
);
3210 r
= _merge_collection(cid
, bits
, dest
, spos
);
3211 tracepoint(objectstore
, merge_coll_exit
, r
);
3215 case Transaction::OP_SETALLOCHINT
:
3217 const coll_t
&_cid
= i
.get_cid(op
->cid
);
3218 const ghobject_t
&oid
= i
.get_oid(op
->oid
);
3219 const coll_t
&cid
= !_need_temp_object_collection(_cid
, oid
) ?
3220 _cid
: _cid
.get_temp();
3221 uint64_t expected_object_size
= op
->expected_object_size
;
3222 uint64_t expected_write_size
= op
->expected_write_size
;
3223 tracepoint(objectstore
, setallochint_enter
, osr_name
);
3224 if (_check_replay_guard(cid
, oid
, spos
) > 0)
3225 r
= _set_alloc_hint(cid
, oid
, expected_object_size
,
3226 expected_write_size
);
3227 tracepoint(objectstore
, setallochint_exit
, r
);
3232 derr
<< "bad op " << op
->op
<< dendl
;
3239 if (r
== -ENOENT
&& !(op
->op
== Transaction::OP_CLONERANGE
||
3240 op
->op
== Transaction::OP_CLONE
||
3241 op
->op
== Transaction::OP_CLONERANGE2
||
3242 op
->op
== Transaction::OP_COLL_ADD
||
3243 op
->op
== Transaction::OP_SETATTR
||
3244 op
->op
== Transaction::OP_SETATTRS
||
3245 op
->op
== Transaction::OP_RMATTR
||
3246 op
->op
== Transaction::OP_OMAP_SETKEYS
||
3247 op
->op
== Transaction::OP_OMAP_RMKEYS
||
3248 op
->op
== Transaction::OP_OMAP_RMKEYRANGE
||
3249 op
->op
== Transaction::OP_OMAP_SETHEADER
))
3250 // -ENOENT is normally okay
3251 // ...including on a replayed OP_RMCOLL with checkpoint mode
3256 if (op
->op
== Transaction::OP_SETALLOCHINT
)
3257 // Either EOPNOTSUPP or EINVAL most probably. EINVAL in most
3258 // cases means invalid hint size (e.g. too big, not a multiple
3259 // of block size, etc) or, at least on xfs, an attempt to set
3260 // or change it when the file is not empty. However,
3261 // OP_SETALLOCHINT is advisory, so ignore all errors.
3264 if (replaying
&& !backend
->can_checkpoint()) {
3265 if (r
== -EEXIST
&& op
->op
== Transaction::OP_MKCOLL
) {
3266 dout(10) << "tolerating EEXIST during journal replay since checkpoint is not enabled" << dendl
;
3269 if (r
== -EEXIST
&& op
->op
== Transaction::OP_COLL_ADD
) {
3270 dout(10) << "tolerating EEXIST during journal replay since checkpoint is not enabled" << dendl
;
3273 if (r
== -EEXIST
&& op
->op
== Transaction::OP_COLL_MOVE
) {
3274 dout(10) << "tolerating EEXIST during journal replay since checkpoint is not enabled" << dendl
;
3278 dout(10) << "tolerating ERANGE on replay" << dendl
;
3282 dout(10) << "tolerating ENOENT on replay" << dendl
;
3288 const char *msg
= "unexpected error code";
3290 if (r
== -ENOENT
&& (op
->op
== Transaction::OP_CLONERANGE
||
3291 op
->op
== Transaction::OP_CLONE
||
3292 op
->op
== Transaction::OP_CLONERANGE2
)) {
3293 msg
= "ENOENT on clone suggests osd bug";
3294 } else if (r
== -ENOSPC
) {
3295 // For now, if we hit _any_ ENOSPC, crash, before we do any damage
3296 // by partially applying transactions.
3297 msg
= "ENOSPC from disk filesystem, misconfigured cluster";
3298 } else if (r
== -ENOTEMPTY
) {
3299 msg
= "ENOTEMPTY suggests garbage data in osd data dir";
3300 } else if (r
== -EPERM
) {
3301 msg
= "EPERM suggests file(s) in osd data dir not owned by ceph user, or leveldb corruption";
3304 derr
<< " error " << cpp_strerror(r
) << " not handled on operation " << op
3305 << " (" << spos
<< ", or op " << spos
.op
<< ", counting from 0)" << dendl
;
3306 dout(0) << msg
<< dendl
;
3307 dout(0) << " transaction dump:\n";
3308 JSONFormatter
f(true);
3309 f
.open_object_section("transaction");
3319 ceph_abort_msg("unexpected error");
3329 /*********************************************/
3333 // --------------------
3336 bool FileStore::exists(CollectionHandle
& ch
, const ghobject_t
& oid
)
3338 tracepoint(objectstore
, exists_enter
, ch
->cid
.c_str());
3339 auto osr
= static_cast<OpSequencer
*>(ch
.get());
3340 osr
->wait_for_apply(oid
);
3342 bool retval
= stat(ch
, oid
, &st
) == 0;
3343 tracepoint(objectstore
, exists_exit
, retval
);
3347 int FileStore::stat(
3348 CollectionHandle
& ch
, const ghobject_t
& oid
, struct stat
*st
, bool allow_eio
)
3350 tracepoint(objectstore
, stat_enter
, ch
->cid
.c_str());
3351 auto osr
= static_cast<OpSequencer
*>(ch
.get());
3352 osr
->wait_for_apply(oid
);
3353 const coll_t
& cid
= !_need_temp_object_collection(ch
->cid
, oid
) ? ch
->cid
: ch
->cid
.get_temp();
3354 int r
= lfn_stat(cid
, oid
, st
);
3355 ceph_assert(allow_eio
|| !m_filestore_fail_eio
|| r
!= -EIO
);
3357 dout(10) << __FUNC__
<< ": " << ch
->cid
<< "/" << oid
3358 << " = " << r
<< dendl
;
3360 dout(10) << __FUNC__
<< ": " << ch
->cid
<< "/" << oid
3362 << " (size " << st
->st_size
<< ")" << dendl
;
3364 if (cct
->_conf
->filestore_debug_inject_read_err
&&
3365 debug_mdata_eio(oid
)) {
3368 tracepoint(objectstore
, stat_exit
, r
);
3373 int FileStore::set_collection_opts(
3374 CollectionHandle
& ch
,
3375 const pool_opts_t
& opts
)
3380 int FileStore::read(
3381 CollectionHandle
& ch
,
3382 const ghobject_t
& oid
,
3389 tracepoint(objectstore
, read_enter
, ch
->cid
.c_str(), offset
, len
);
3390 const coll_t
& cid
= !_need_temp_object_collection(ch
->cid
, oid
) ? ch
->cid
: ch
->cid
.get_temp();
3392 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " " << offset
<< "~" << len
<< dendl
;
3394 auto osr
= static_cast<OpSequencer
*>(ch
.get());
3395 osr
->wait_for_apply(oid
);
3398 int r
= lfn_open(cid
, oid
, false, &fd
);
3400 dout(10) << __FUNC__
<< ": (" << cid
<< "/" << oid
<< ") open error: "
3401 << cpp_strerror(r
) << dendl
;
3405 if (offset
== 0 && len
== 0) {
3407 memset(&st
, 0, sizeof(struct stat
));
3408 int r
= ::fstat(**fd
, &st
);
3409 ceph_assert(r
== 0);
3413 #ifdef HAVE_POSIX_FADVISE
3414 if (op_flags
& CEPH_OSD_OP_FLAG_FADVISE_RANDOM
)
3415 posix_fadvise(**fd
, offset
, len
, POSIX_FADV_RANDOM
);
3416 if (op_flags
& CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL
)
3417 posix_fadvise(**fd
, offset
, len
, POSIX_FADV_SEQUENTIAL
);
3420 bufferptr
bptr(len
); // prealloc space for entire read
3421 got
= safe_pread(**fd
, bptr
.c_str(), len
, offset
);
3423 dout(10) << __FUNC__
<< ": (" << cid
<< "/" << oid
<< ") pread error: " << cpp_strerror(got
) << dendl
;
3427 bptr
.set_length(got
); // properly size the buffer
3429 bl
.push_back(std::move(bptr
)); // put it in the target bufferlist
3431 #ifdef HAVE_POSIX_FADVISE
3432 if (op_flags
& CEPH_OSD_OP_FLAG_FADVISE_DONTNEED
)
3433 posix_fadvise(**fd
, offset
, len
, POSIX_FADV_DONTNEED
);
3434 if (op_flags
& (CEPH_OSD_OP_FLAG_FADVISE_RANDOM
| CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL
))
3435 posix_fadvise(**fd
, offset
, len
, POSIX_FADV_NORMAL
);
3438 if (m_filestore_sloppy_crc
&& (!replaying
|| backend
->can_checkpoint())) {
3440 int errors
= backend
->_crc_verify_read(**fd
, offset
, got
, bl
, &ss
);
3442 dout(0) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " " << offset
<< "~"
3443 << got
<< " ... BAD CRC:\n" << ss
.str() << dendl
;
3444 ceph_abort_msg("bad crc on read");
3450 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " " << offset
<< "~"
3451 << got
<< "/" << len
<< dendl
;
3452 if (cct
->_conf
->filestore_debug_inject_read_err
&&
3453 debug_data_eio(oid
)) {
3455 } else if (oid
.hobj
.pool
> 0 && /* FIXME, see #23029 */
3456 cct
->_conf
->filestore_debug_random_read_err
&&
3457 (rand() % (int)(cct
->_conf
->filestore_debug_random_read_err
*
3459 dout(0) << __func__
<< ": inject random EIO" << dendl
;
3462 tracepoint(objectstore
, read_exit
, got
);
3467 int FileStore::_do_fiemap(int fd
, uint64_t offset
, size_t len
,
3468 map
<uint64_t, uint64_t> *m
)
3471 struct fiemap_extent
*extent
= nullptr;
3472 struct fiemap
*fiemap
= nullptr;
3476 r
= backend
->do_fiemap(fd
, offset
, len
, &fiemap
);
3480 if (fiemap
->fm_mapped_extents
== 0) {
3485 extent
= &fiemap
->fm_extents
[0];
3487 /* start where we were asked to start */
3488 if (extent
->fe_logical
< offset
) {
3489 extent
->fe_length
-= offset
- extent
->fe_logical
;
3490 extent
->fe_logical
= offset
;
3495 struct fiemap_extent
*last
= nullptr;
3496 while (i
< fiemap
->fm_mapped_extents
) {
3497 struct fiemap_extent
*next
= extent
+ 1;
3499 dout(10) << __FUNC__
<< ": fm_mapped_extents=" << fiemap
->fm_mapped_extents
3500 << " fe_logical=" << extent
->fe_logical
<< " fe_length=" << extent
->fe_length
<< dendl
;
3502 /* try to merge extents */
3503 while ((i
< fiemap
->fm_mapped_extents
- 1) &&
3504 (extent
->fe_logical
+ extent
->fe_length
== next
->fe_logical
)) {
3505 next
->fe_length
+= extent
->fe_length
;
3506 next
->fe_logical
= extent
->fe_logical
;
3512 if (extent
->fe_logical
+ extent
->fe_length
> offset
+ len
)
3513 extent
->fe_length
= offset
+ len
- extent
->fe_logical
;
3514 (*m
)[extent
->fe_logical
] = extent
->fe_length
;
3518 uint64_t xoffset
= last
->fe_logical
+ last
->fe_length
- offset
;
3519 offset
= last
->fe_logical
+ last
->fe_length
;
3521 const bool is_last
= (last
->fe_flags
& FIEMAP_EXTENT_LAST
) || (len
== 0);
3530 int FileStore::_do_seek_hole_data(int fd
, uint64_t offset
, size_t len
,
3531 map
<uint64_t, uint64_t> *m
)
3533 #if defined(__linux__) && defined(SEEK_HOLE) && defined(SEEK_DATA)
3534 off_t hole_pos
, data_pos
;
3537 // If lseek fails with errno setting to be ENXIO, this means the current
3538 // file offset is beyond the end of the file.
3539 off_t start
= offset
;
3540 while(start
< (off_t
)(offset
+ len
)) {
3541 data_pos
= lseek(fd
, start
, SEEK_DATA
);
3547 dout(10) << "failed to lseek: " << cpp_strerror(r
) << dendl
;
3550 } else if (data_pos
> (off_t
)(offset
+ len
)) {
3554 hole_pos
= lseek(fd
, data_pos
, SEEK_HOLE
);
3556 if (errno
== ENXIO
) {
3560 dout(10) << "failed to lseek: " << cpp_strerror(r
) << dendl
;
3565 if (hole_pos
>= (off_t
)(offset
+ len
)) {
3566 (*m
)[data_pos
] = offset
+ len
- data_pos
;
3569 (*m
)[data_pos
] = hole_pos
- data_pos
;
3580 int FileStore::fiemap(CollectionHandle
& ch
, const ghobject_t
& oid
,
3581 uint64_t offset
, size_t len
,
3584 map
<uint64_t, uint64_t> exomap
;
3585 int r
= fiemap(ch
, oid
, offset
, len
, exomap
);
3592 int FileStore::fiemap(CollectionHandle
& ch
, const ghobject_t
& oid
,
3593 uint64_t offset
, size_t len
,
3594 map
<uint64_t, uint64_t>& destmap
)
3596 tracepoint(objectstore
, fiemap_enter
, ch
->cid
.c_str(), offset
, len
);
3597 const coll_t
& cid
= !_need_temp_object_collection(ch
->cid
, oid
) ? ch
->cid
: ch
->cid
.get_temp();
3600 if ((!backend
->has_seek_data_hole() && !backend
->has_fiemap()) ||
3601 len
<= (size_t)m_filestore_fiemap_threshold
) {
3602 destmap
[offset
] = len
;
3606 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " " << offset
<< "~" << len
<< dendl
;
3608 auto osr
= static_cast<OpSequencer
*>(ch
.get());
3609 osr
->wait_for_apply(oid
);
3613 int r
= lfn_open(cid
, oid
, false, &fd
);
3615 dout(10) << "read couldn't open " << cid
<< "/" << oid
<< ": " << cpp_strerror(r
) << dendl
;
3619 if (backend
->has_seek_data_hole()) {
3620 dout(15) << "seek_data/seek_hole " << cid
<< "/" << oid
<< " " << offset
<< "~" << len
<< dendl
;
3621 r
= _do_seek_hole_data(**fd
, offset
, len
, &destmap
);
3622 } else if (backend
->has_fiemap()) {
3623 dout(15) << "fiemap ioctl" << cid
<< "/" << oid
<< " " << offset
<< "~" << len
<< dendl
;
3624 r
= _do_fiemap(**fd
, offset
, len
, &destmap
);
3631 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " " << offset
<< "~" << len
<< " = " << r
<< " num_extents=" << destmap
.size() << " " << destmap
<< dendl
;
3632 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
3633 tracepoint(objectstore
, fiemap_exit
, r
);
3637 int FileStore::_remove(const coll_t
& cid
, const ghobject_t
& oid
,
3638 const SequencerPosition
&spos
)
3640 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< dendl
;
3641 int r
= lfn_unlink(cid
, oid
, spos
);
3642 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " = " << r
<< dendl
;
3646 int FileStore::_truncate(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t size
)
3648 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " size " << size
<< dendl
;
3649 int r
= lfn_truncate(cid
, oid
, size
);
3650 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " size " << size
<< " = " << r
<< dendl
;
3655 int FileStore::_touch(const coll_t
& cid
, const ghobject_t
& oid
)
3657 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< dendl
;
3660 int r
= lfn_open(cid
, oid
, true, &fd
);
3666 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " = " << r
<< dendl
;
3670 int FileStore::_write(const coll_t
& cid
, const ghobject_t
& oid
,
3671 uint64_t offset
, size_t len
,
3672 const bufferlist
& bl
, uint32_t fadvise_flags
)
3674 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " " << offset
<< "~" << len
<< dendl
;
3678 r
= lfn_open(cid
, oid
, true, &fd
);
3680 dout(0) << __FUNC__
<< ": couldn't open " << cid
<< "/"
3682 << cpp_strerror(r
) << dendl
;
3687 r
= bl
.write_fd(**fd
, offset
);
3689 derr
<< __FUNC__
<< ": write_fd on " << cid
<< "/" << oid
3690 << " error: " << cpp_strerror(r
) << dendl
;
3696 if (r
>= 0 && m_filestore_sloppy_crc
) {
3697 int rc
= backend
->_crc_update_write(**fd
, offset
, len
, bl
);
3698 ceph_assert(rc
>= 0);
3701 if (replaying
|| m_disable_wbthrottle
) {
3702 if (fadvise_flags
& CEPH_OSD_OP_FLAG_FADVISE_DONTNEED
) {
3703 #ifdef HAVE_POSIX_FADVISE
3704 posix_fadvise(**fd
, 0, 0, POSIX_FADV_DONTNEED
);
3708 wbthrottle
.queue_wb(fd
, oid
, offset
, len
,
3709 fadvise_flags
& CEPH_OSD_OP_FLAG_FADVISE_DONTNEED
);
3715 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " " << offset
<< "~" << len
<< " = " << r
<< dendl
;
3719 int FileStore::_zero(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t offset
, size_t len
)
3721 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " " << offset
<< "~" << len
<< dendl
;
3724 if (cct
->_conf
->filestore_punch_hole
) {
3725 #ifdef CEPH_HAVE_FALLOCATE
3726 # if !defined(__APPLE__) && !defined(__FreeBSD__)
3727 # ifdef FALLOC_FL_KEEP_SIZE
3728 // first try to punch a hole.
3730 ret
= lfn_open(cid
, oid
, false, &fd
);
3736 ret
= ::fstat(**fd
, &st
);
3743 // first try fallocate
3744 ret
= fallocate(**fd
, FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
,
3749 // ensure we extend file size, if needed
3750 if (len
> 0 && offset
+ len
> (uint64_t)st
.st_size
) {
3751 ret
= ::ftruncate(**fd
, offset
+ len
);
3761 if (ret
>= 0 && m_filestore_sloppy_crc
) {
3762 int rc
= backend
->_crc_update_zero(**fd
, offset
, len
);
3763 ceph_assert(rc
>= 0);
3768 if (ret
!= -EOPNOTSUPP
)
3769 goto out
; // some other error
3775 // lame, kernel is old and doesn't support it.
3776 // write zeros.. yuck!
3777 dout(20) << __FUNC__
<< ": falling back to writing zeros" << dendl
;
3780 bl
.append_zero(len
);
3781 ret
= _write(cid
, oid
, offset
, len
, bl
);
3784 #ifdef CEPH_HAVE_FALLOCATE
3785 # if !defined(__APPLE__) && !defined(__FreeBSD__)
3786 # ifdef FALLOC_FL_KEEP_SIZE
3791 dout(20) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " " << offset
<< "~" << len
<< " = " << ret
<< dendl
;
3795 int FileStore::_clone(const coll_t
& cid
, const ghobject_t
& oldoid
, const ghobject_t
& newoid
,
3796 const SequencerPosition
& spos
)
3798 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oldoid
<< " -> " << cid
<< "/" << newoid
<< dendl
;
3800 if (_check_replay_guard(cid
, newoid
, spos
) < 0)
3807 r
= lfn_open(cid
, oldoid
, false, &o
, &index
);
3811 ceph_assert(index
.index
);
3812 std::unique_lock l
{(index
.index
)->access_lock
};
3814 r
= lfn_open(cid
, newoid
, true, &n
, &index
);
3818 r
= ::ftruncate(**n
, 0);
3824 r
= ::fstat(**o
, &st
);
3830 r
= _do_clone_range(**o
, **n
, 0, st
.st_size
, 0);
3835 dout(20) << "objectmap clone" << dendl
;
3836 r
= object_map
->clone(oldoid
, newoid
, &spos
);
3837 if (r
< 0 && r
!= -ENOENT
)
3843 map
<string
, bufferptr
> aset
;
3844 r
= _fgetattrs(**o
, aset
);
3848 r
= chain_fgetxattr(**o
, XATTR_SPILL_OUT_NAME
, buf
, sizeof(buf
));
3849 if (r
>= 0 && !strncmp(buf
, XATTR_NO_SPILL_OUT
, sizeof(XATTR_NO_SPILL_OUT
))) {
3850 r
= chain_fsetxattr
<true, true>(**n
, XATTR_SPILL_OUT_NAME
, XATTR_NO_SPILL_OUT
,
3851 sizeof(XATTR_NO_SPILL_OUT
));
3853 r
= chain_fsetxattr
<true, true>(**n
, XATTR_SPILL_OUT_NAME
, XATTR_SPILL_OUT
,
3854 sizeof(XATTR_SPILL_OUT
));
3859 r
= _fsetattrs(**n
, aset
);
3864 // clone is non-idempotent; record our work.
3865 _set_replay_guard(**n
, spos
, &newoid
);
3872 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oldoid
<< " -> " << cid
<< "/" << newoid
<< " = " << r
<< dendl
;
3873 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
3877 int FileStore::_do_clone_range(int from
, int to
, uint64_t srcoff
, uint64_t len
, uint64_t dstoff
)
3879 dout(20) << __FUNC__
<< ": copy " << srcoff
<< "~" << len
<< " to " << dstoff
<< dendl
;
3880 return backend
->clone_range(from
, to
, srcoff
, len
, dstoff
);
3883 int FileStore::_do_sparse_copy_range(int from
, int to
, uint64_t srcoff
, uint64_t len
, uint64_t dstoff
)
3885 dout(20) << __FUNC__
<< ": " << srcoff
<< "~" << len
<< " to " << dstoff
<< dendl
;
3887 map
<uint64_t, uint64_t> exomap
;
3888 // fiemap doesn't allow zero length
3892 if (backend
->has_seek_data_hole()) {
3893 dout(15) << "seek_data/seek_hole " << from
<< " " << srcoff
<< "~" << len
<< dendl
;
3894 r
= _do_seek_hole_data(from
, srcoff
, len
, &exomap
);
3895 } else if (backend
->has_fiemap()) {
3896 dout(15) << "fiemap ioctl" << from
<< " " << srcoff
<< "~" << len
<< dendl
;
3897 r
= _do_fiemap(from
, srcoff
, len
, &exomap
);
3901 int64_t written
= 0;
3905 for (map
<uint64_t, uint64_t>::iterator miter
= exomap
.begin(); miter
!= exomap
.end(); ++miter
) {
3906 uint64_t it_off
= miter
->first
- srcoff
+ dstoff
;
3907 r
= _do_copy_range(from
, to
, miter
->first
, miter
->second
, it_off
, true);
3909 derr
<< __FUNC__
<< ": copy error at " << miter
->first
<< "~" << miter
->second
3910 << " to " << it_off
<< ", " << cpp_strerror(r
) << dendl
;
3913 written
+= miter
->second
;
3917 if (m_filestore_sloppy_crc
) {
3918 int rc
= backend
->_crc_update_clone_range(from
, to
, srcoff
, len
, dstoff
);
3919 ceph_assert(rc
>= 0);
3922 r
= ::fstat(to
, &st
);
3925 derr
<< __FUNC__
<< ": fstat error at " << to
<< " " << cpp_strerror(r
) << dendl
;
3928 if (st
.st_size
< (int)(dstoff
+ len
)) {
3929 r
= ::ftruncate(to
, dstoff
+ len
);
3932 derr
<< __FUNC__
<< ": ftruncate error at " << dstoff
+len
<< " " << cpp_strerror(r
) << dendl
;
3940 dout(20) << __FUNC__
<< ": " << srcoff
<< "~" << len
<< " to " << dstoff
<< " = " << r
<< dendl
;
3944 int FileStore::_do_copy_range(int from
, int to
, uint64_t srcoff
, uint64_t len
, uint64_t dstoff
, bool skip_sloppycrc
)
3946 dout(20) << __FUNC__
<< ": " << srcoff
<< "~" << len
<< " to " << dstoff
<< dendl
;
3948 loff_t pos
= srcoff
;
3949 loff_t end
= srcoff
+ len
;
3950 int buflen
= 4096 * 16; //limit by pipe max size.see fcntl
3952 #ifdef CEPH_HAVE_SPLICE
3953 if (backend
->has_splice()) {
3955 if (pipe_cloexec(pipefd
, 0) < 0) {
3957 derr
<< " pipe " << " got " << cpp_strerror(e
) << dendl
;
3961 loff_t dstpos
= dstoff
;
3963 int l
= std::min
<int>(end
-pos
, buflen
);
3964 r
= safe_splice(from
, &pos
, pipefd
[1], nullptr, l
, SPLICE_F_NONBLOCK
);
3965 dout(10) << " safe_splice read from " << pos
<< "~" << l
<< " got " << r
<< dendl
;
3967 derr
<< __FUNC__
<< ": safe_splice read error at " << pos
<< "~" << len
3968 << ", " << cpp_strerror(r
) << dendl
;
3972 // hrm, bad source range, wtf.
3974 derr
<< __FUNC__
<< ": got short read result at " << pos
3975 << " of fd " << from
<< " len " << len
<< dendl
;
3979 r
= safe_splice(pipefd
[0], nullptr, to
, &dstpos
, r
, 0);
3980 dout(10) << " safe_splice write to " << to
<< " len " << r
3981 << " got " << r
<< dendl
;
3983 derr
<< __FUNC__
<< ": write error at " << pos
<< "~"
3984 << r
<< ", " << cpp_strerror(r
) << dendl
;
3995 actual
= ::lseek64(from
, srcoff
, SEEK_SET
);
3996 if (actual
!= (int64_t)srcoff
) {
4001 derr
<< "lseek64 to " << srcoff
<< " got " << cpp_strerror(r
) << dendl
;
4004 actual
= ::lseek64(to
, dstoff
, SEEK_SET
);
4005 if (actual
!= (int64_t)dstoff
) {
4010 derr
<< "lseek64 to " << dstoff
<< " got " << cpp_strerror(r
) << dendl
;
4016 int l
= std::min
<int>(end
-pos
, buflen
);
4017 r
= ::read(from
, buf
, l
);
4018 dout(25) << " read from " << pos
<< "~" << l
<< " got " << r
<< dendl
;
4020 if (errno
== EINTR
) {
4024 derr
<< __FUNC__
<< ": read error at " << pos
<< "~" << len
4025 << ", " << cpp_strerror(r
) << dendl
;
4030 // hrm, bad source range, wtf.
4032 derr
<< __FUNC__
<< ": got short read result at " << pos
4033 << " of fd " << from
<< " len " << len
<< dendl
;
4038 int r2
= safe_write(to
, buf
+op
, r
-op
);
4039 dout(25) << " write to " << to
<< " len " << (r
-op
)
4040 << " got " << r2
<< dendl
;
4043 derr
<< __FUNC__
<< ": write error at " << pos
<< "~"
4044 << r
-op
<< ", " << cpp_strerror(r
) << dendl
;
4056 if (r
< 0 && replaying
) {
4057 ceph_assert(r
== -ERANGE
);
4058 derr
<< __FUNC__
<< ": short source tolerated because we are replaying" << dendl
;
4061 ceph_assert(replaying
|| pos
== end
);
4062 if (r
>= 0 && !skip_sloppycrc
&& m_filestore_sloppy_crc
) {
4063 int rc
= backend
->_crc_update_clone_range(from
, to
, srcoff
, len
, dstoff
);
4064 ceph_assert(rc
>= 0);
4066 dout(20) << __FUNC__
<< ": " << srcoff
<< "~" << len
<< " to " << dstoff
<< " = " << r
<< dendl
;
4070 int FileStore::_clone_range(const coll_t
& oldcid
, const ghobject_t
& oldoid
, const coll_t
& newcid
, const ghobject_t
& newoid
,
4071 uint64_t srcoff
, uint64_t len
, uint64_t dstoff
,
4072 const SequencerPosition
& spos
)
4074 dout(15) << __FUNC__
<< ": " << oldcid
<< "/" << oldoid
<< " -> " << newcid
<< "/" << newoid
<< " " << srcoff
<< "~" << len
<< " to " << dstoff
<< dendl
;
4076 if (_check_replay_guard(newcid
, newoid
, spos
) < 0)
4081 r
= lfn_open(oldcid
, oldoid
, false, &o
);
4085 r
= lfn_open(newcid
, newoid
, true, &n
);
4089 r
= _do_clone_range(**o
, **n
, srcoff
, len
, dstoff
);
4094 // clone is non-idempotent; record our work.
4095 _set_replay_guard(**n
, spos
, &newoid
);
4102 dout(10) << __FUNC__
<< ": " << oldcid
<< "/" << oldoid
<< " -> " << newcid
<< "/" << newoid
<< " "
4103 << srcoff
<< "~" << len
<< " to " << dstoff
<< " = " << r
<< dendl
;
4107 class SyncEntryTimeout
: public Context
{
4110 explicit SyncEntryTimeout(CephContext
* cct
, int commit_timeo
)
4111 : cct(cct
), m_commit_timeo(commit_timeo
)
4115 void finish(int r
) override
{
4116 BackTrace
*bt
= new BackTrace(1);
4117 generic_dout(-1) << "FileStore: sync_entry timed out after "
4118 << m_commit_timeo
<< " seconds.\n";
4129 void FileStore::sync_entry()
4131 std::unique_lock l
{lock
};
4133 auto min_interval
= ceph::make_timespan(m_filestore_min_sync_interval
);
4134 auto max_interval
= ceph::make_timespan(m_filestore_max_sync_interval
);
4135 auto startwait
= ceph::real_clock::now();
4137 dout(20) << __FUNC__
<< ": waiting for max_interval " << max_interval
<< dendl
;
4138 sync_cond
.wait_for(l
, max_interval
);
4140 dout(20) << __FUNC__
<< ": not waiting, force_sync set" << dendl
;
4144 dout(20) << __FUNC__
<< ": force_sync set" << dendl
;
4147 dout(20) << __FUNC__
<< ": stop set" << dendl
;
4150 // wait for at least the min interval
4151 auto woke
= ceph::real_clock::now() - startwait
;
4152 dout(20) << __FUNC__
<< ": woke after " << woke
<< dendl
;
4153 if (woke
< min_interval
) {
4154 auto t
= min_interval
- woke
;
4155 dout(20) << __FUNC__
<< ": waiting for another " << t
4156 << " to reach min interval " << min_interval
<< dendl
;
4157 sync_cond
.wait_for(l
, t
);
4163 fin
.swap(sync_waiters
);
4167 if (apply_manager
.commit_start()) {
4168 auto start
= ceph::real_clock::now();
4169 uint64_t cp
= apply_manager
.get_committing_seq();
4171 sync_entry_timeo_lock
.lock();
4172 SyncEntryTimeout
*sync_entry_timeo
=
4173 new SyncEntryTimeout(cct
, m_filestore_commit_timeout
);
4174 if (!timer
.add_event_after(m_filestore_commit_timeout
,
4175 sync_entry_timeo
)) {
4176 sync_entry_timeo
= nullptr;
4178 sync_entry_timeo_lock
.unlock();
4180 logger
->set(l_filestore_committing
, 1);
4182 dout(15) << __FUNC__
<< ": committing " << cp
<< dendl
;
4183 stringstream errstream
;
4184 if (cct
->_conf
->filestore_debug_omap_check
&& !object_map
->check(errstream
)) {
4185 derr
<< errstream
.str() << dendl
;
4189 if (backend
->can_checkpoint()) {
4190 int err
= write_op_seq(op_fd
, cp
);
4192 derr
<< "Error during write_op_seq: " << cpp_strerror(err
) << dendl
;
4193 ceph_abort_msg("error during write_op_seq");
4197 snprintf(s
, sizeof(s
), COMMIT_SNAP_ITEM
, (long long unsigned)cp
);
4199 err
= backend
->create_checkpoint(s
, &cid
);
4202 derr
<< "snap create '" << s
<< "' got error " << err
<< dendl
;
4203 ceph_assert(err
== 0);
4206 snaps
.push_back(cp
);
4207 apply_manager
.commit_started();
4211 dout(20) << " waiting for checkpoint " << cid
<< " to complete" << dendl
;
4212 err
= backend
->sync_checkpoint(cid
);
4214 derr
<< "ioctl WAIT_SYNC got " << cpp_strerror(err
) << dendl
;
4215 ceph_abort_msg("wait_sync got error");
4217 dout(20) << " done waiting for checkpoint " << cid
<< " to complete" << dendl
;
4220 apply_manager
.commit_started();
4223 int err
= object_map
->sync();
4225 derr
<< "object_map sync got " << cpp_strerror(err
) << dendl
;
4226 ceph_abort_msg("object_map sync returned error");
4229 err
= backend
->syncfs();
4231 derr
<< "syncfs got " << cpp_strerror(err
) << dendl
;
4232 ceph_abort_msg("syncfs returned error");
4235 err
= write_op_seq(op_fd
, cp
);
4237 derr
<< "Error during write_op_seq: " << cpp_strerror(err
) << dendl
;
4238 ceph_abort_msg("error during write_op_seq");
4240 err
= ::fsync(op_fd
);
4242 derr
<< "Error during fsync of op_seq: " << cpp_strerror(err
) << dendl
;
4243 ceph_abort_msg("error during fsync of op_seq");
4247 auto done
= ceph::real_clock::now();
4248 auto lat
= done
- start
;
4249 auto dur
= done
- startwait
;
4250 dout(10) << __FUNC__
<< ": commit took " << lat
<< ", interval was " << dur
<< dendl
;
4251 utime_t max_pause_lat
= logger
->tget(l_filestore_sync_pause_max_lat
);
4252 if (max_pause_lat
< utime_t
{dur
- lat
}) {
4253 logger
->tinc(l_filestore_sync_pause_max_lat
, dur
- lat
);
4256 logger
->inc(l_filestore_commitcycle
);
4257 logger
->tinc(l_filestore_commitcycle_latency
, lat
);
4258 logger
->tinc(l_filestore_commitcycle_interval
, dur
);
4260 apply_manager
.commit_finish();
4261 if (!m_disable_wbthrottle
) {
4265 logger
->set(l_filestore_committing
, 0);
4267 // remove old snaps?
4268 if (backend
->can_checkpoint()) {
4270 while (snaps
.size() > 2) {
4271 snprintf(s
, sizeof(s
), COMMIT_SNAP_ITEM
, (long long unsigned)snaps
.front());
4273 dout(10) << "removing snap '" << s
<< "'" << dendl
;
4274 int r
= backend
->destroy_checkpoint(s
);
4277 derr
<< "unable to destroy snap '" << s
<< "' got " << cpp_strerror(err
) << dendl
;
4282 dout(15) << __FUNC__
<< ": committed to op_seq " << cp
<< dendl
;
4284 if (sync_entry_timeo
) {
4285 std::lock_guard lock
{sync_entry_timeo_lock
};
4286 timer
.cancel_event(sync_entry_timeo
);
4293 finish_contexts(cct
, fin
, 0);
4295 if (!sync_waiters
.empty()) {
4296 dout(10) << __FUNC__
<< ": more waiters, committing again" << dendl
;
4299 if (!stop
&& journal
&& journal
->should_commit_now()) {
4300 dout(10) << __FUNC__
<< ": journal says we should commit again (probably is/was full)" << dendl
;
4307 void FileStore::do_force_sync()
4309 dout(10) << __FUNC__
<< dendl
;
4310 std::lock_guard l
{lock
};
4312 sync_cond
.notify_all();
4315 void FileStore::start_sync(Context
*onsafe
)
4317 std::lock_guard l
{lock
};
4318 sync_waiters
.push_back(onsafe
);
4319 sync_cond
.notify_all();
4321 dout(10) << __FUNC__
<< dendl
;
4324 void FileStore::sync()
4326 ceph::mutex m
= ceph::make_mutex("FileStore::sync");
4327 ceph::condition_variable c
;
4329 C_SafeCond
*fin
= new C_SafeCond(m
, c
, &done
);
4333 std::unique_lock l
{m
};
4334 c
.wait(l
, [&done
, this] {
4336 dout(10) << "sync waiting" << dendl
;
4340 dout(10) << "sync done" << dendl
;
4343 void FileStore::_flush_op_queue()
4345 dout(10) << __FUNC__
<< ": draining op tp" << dendl
;
4347 dout(10) << __FUNC__
<< ": waiting for apply finisher" << dendl
;
4348 for (vector
<Finisher
*>::iterator it
= apply_finishers
.begin(); it
!= apply_finishers
.end(); ++it
) {
4349 (*it
)->wait_for_empty();
4354 * flush - make every queued write readable
4356 void FileStore::flush()
4358 dout(10) << __FUNC__
<< dendl
;
4360 if (cct
->_conf
->filestore_blackhole
) {
4362 ceph::mutex lock
= ceph::make_mutex("FileStore::flush::lock");
4363 ceph::condition_variable cond
;
4364 std::unique_lock l
{lock
};
4365 cond
.wait(l
, [] {return false;} );
4369 if (m_filestore_journal_writeahead
) {
4372 dout(10) << __FUNC__
<< ": draining ondisk finisher" << dendl
;
4373 for (vector
<Finisher
*>::iterator it
= ondisk_finishers
.begin(); it
!= ondisk_finishers
.end(); ++it
) {
4374 (*it
)->wait_for_empty();
4379 dout(10) << __FUNC__
<< ": complete" << dendl
;
4383 * sync_and_flush - make every queued write readable AND committed to disk
4385 void FileStore::sync_and_flush()
4387 dout(10) << __FUNC__
<< dendl
;
4389 if (m_filestore_journal_writeahead
) {
4394 // includes m_filestore_journal_parallel
4398 dout(10) << __FUNC__
<< ": done" << dendl
;
4401 int FileStore::flush_journal()
4403 dout(10) << __FUNC__
<< dendl
;
4409 int FileStore::snapshot(const string
& name
)
4411 dout(10) << __FUNC__
<< ": " << name
<< dendl
;
4414 if (!backend
->can_checkpoint()) {
4415 dout(0) << __FUNC__
<< ": " << name
<< " failed, not supported" << dendl
;
4420 snprintf(s
, sizeof(s
), CLUSTER_SNAP_ITEM
, name
.c_str());
4422 int r
= backend
->create_checkpoint(s
, nullptr);
4424 derr
<< __FUNC__
<< ": " << name
<< " failed: " << cpp_strerror(r
) << dendl
;
4430 // -------------------------------
4433 int FileStore::_fgetattr(int fd
, const char *name
, bufferptr
& bp
)
4435 char val
[CHAIN_XATTR_MAX_BLOCK_LEN
];
4436 int l
= chain_fgetxattr(fd
, name
, val
, sizeof(val
));
4438 bp
= buffer::create(l
);
4439 memcpy(bp
.c_str(), val
, l
);
4440 } else if (l
== -ERANGE
) {
4441 l
= chain_fgetxattr(fd
, name
, 0, 0);
4443 bp
= buffer::create(l
);
4444 l
= chain_fgetxattr(fd
, name
, bp
.c_str(), l
);
4447 ceph_assert(!m_filestore_fail_eio
|| l
!= -EIO
);
4451 int FileStore::_fgetattrs(int fd
, map
<string
,bufferptr
>& aset
)
4455 int len
= chain_flistxattr(fd
, names1
, sizeof(names1
)-1);
4458 if (len
== -ERANGE
) {
4459 len
= chain_flistxattr(fd
, 0, 0);
4461 ceph_assert(!m_filestore_fail_eio
|| len
!= -EIO
);
4464 dout(10) << " -ERANGE, len is " << len
<< dendl
;
4465 names2
= new char[len
+1];
4466 len
= chain_flistxattr(fd
, names2
, len
);
4467 dout(10) << " -ERANGE, got " << len
<< dendl
;
4469 ceph_assert(!m_filestore_fail_eio
|| len
!= -EIO
);
4474 } else if (len
< 0) {
4475 ceph_assert(!m_filestore_fail_eio
|| len
!= -EIO
);
4482 char *end
= name
+ len
;
4483 while (name
< end
) {
4484 char *attrname
= name
;
4485 if (parse_attrname(&name
)) {
4487 dout(20) << __FUNC__
<< ": " << fd
<< " getting '" << name
<< "'" << dendl
;
4488 int r
= _fgetattr(fd
, attrname
, aset
[name
]);
4495 name
+= strlen(name
) + 1;
4502 int FileStore::_fsetattrs(int fd
, map
<string
, bufferptr
> &aset
)
4504 for (map
<string
, bufferptr
>::iterator p
= aset
.begin();
4507 char n
[CHAIN_XATTR_MAX_NAME_LEN
];
4508 get_attrname(p
->first
.c_str(), n
, CHAIN_XATTR_MAX_NAME_LEN
);
4510 if (p
->second
.length())
4511 val
= p
->second
.c_str();
4514 // ??? Why do we skip setting all the other attrs if one fails?
4515 int r
= chain_fsetxattr(fd
, n
, val
, p
->second
.length());
4517 derr
<< __FUNC__
<< ": chain_setxattr returned " << r
<< dendl
;
4524 // debug EIO injection
4525 void FileStore::inject_data_error(const ghobject_t
&oid
) {
4526 std::lock_guard l
{read_error_lock
};
4527 dout(10) << __FUNC__
<< ": init error on " << oid
<< dendl
;
4528 data_error_set
.insert(oid
);
4530 void FileStore::inject_mdata_error(const ghobject_t
&oid
) {
4531 std::lock_guard l
{read_error_lock
};
4532 dout(10) << __FUNC__
<< ": init error on " << oid
<< dendl
;
4533 mdata_error_set
.insert(oid
);
4536 void FileStore::debug_obj_on_delete(const ghobject_t
&oid
) {
4537 std::lock_guard l
{read_error_lock
};
4538 dout(10) << __FUNC__
<< ": clear error on " << oid
<< dendl
;
4539 data_error_set
.erase(oid
);
4540 mdata_error_set
.erase(oid
);
4542 bool FileStore::debug_data_eio(const ghobject_t
&oid
) {
4543 std::lock_guard l
{read_error_lock
};
4544 if (data_error_set
.count(oid
)) {
4545 dout(10) << __FUNC__
<< ": inject error on " << oid
<< dendl
;
4551 bool FileStore::debug_mdata_eio(const ghobject_t
&oid
) {
4552 std::lock_guard l
{read_error_lock
};
4553 if (mdata_error_set
.count(oid
)) {
4554 dout(10) << __FUNC__
<< ": inject error on " << oid
<< dendl
;
4564 int FileStore::getattr(CollectionHandle
& ch
, const ghobject_t
& oid
, const char *name
, bufferptr
&bp
)
4566 tracepoint(objectstore
, getattr_enter
, ch
->cid
.c_str());
4567 const coll_t
& cid
= !_need_temp_object_collection(ch
->cid
, oid
) ? ch
->cid
: ch
->cid
.get_temp();
4568 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " '" << name
<< "'" << dendl
;
4570 auto osr
= static_cast<OpSequencer
*>(ch
.get());
4571 osr
->wait_for_apply(oid
);
4574 int r
= lfn_open(cid
, oid
, false, &fd
);
4578 char n
[CHAIN_XATTR_MAX_NAME_LEN
];
4579 get_attrname(name
, n
, CHAIN_XATTR_MAX_NAME_LEN
);
4580 r
= _fgetattr(**fd
, n
, bp
);
4582 if (r
== -ENODATA
) {
4583 map
<string
, bufferlist
> got
;
4585 to_get
.insert(string(name
));
4587 r
= get_index(cid
, &index
);
4589 dout(10) << __FUNC__
<< ": could not get index r = " << r
<< dendl
;
4592 r
= object_map
->get_xattrs(oid
, to_get
, &got
);
4593 if (r
< 0 && r
!= -ENOENT
) {
4594 dout(10) << __FUNC__
<< ": get_xattrs err r =" << r
<< dendl
;
4598 dout(10) << __FUNC__
<< ": got.size() is 0" << dendl
;
4601 bp
= bufferptr(got
.begin()->second
.c_str(),
4602 got
.begin()->second
.length());
4606 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " '" << name
<< "' = " << r
<< dendl
;
4607 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
4608 if (cct
->_conf
->filestore_debug_inject_read_err
&&
4609 debug_mdata_eio(oid
)) {
4612 tracepoint(objectstore
, getattr_exit
, r
);
4613 return r
< 0 ? r
: 0;
4617 int FileStore::getattrs(CollectionHandle
& ch
, const ghobject_t
& oid
, map
<string
,bufferptr
>& aset
)
4619 tracepoint(objectstore
, getattrs_enter
, ch
->cid
.c_str());
4620 const coll_t
& cid
= !_need_temp_object_collection(ch
->cid
, oid
) ? ch
->cid
: ch
->cid
.get_temp();
4621 set
<string
> omap_attrs
;
4622 map
<string
, bufferlist
> omap_aset
;
4624 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< dendl
;
4626 auto osr
= static_cast<OpSequencer
*>(ch
.get());
4627 osr
->wait_for_apply(oid
);
4630 bool spill_out
= true;
4633 int r
= lfn_open(cid
, oid
, false, &fd
);
4638 r
= chain_fgetxattr(**fd
, XATTR_SPILL_OUT_NAME
, buf
, sizeof(buf
));
4639 if (r
>= 0 && !strncmp(buf
, XATTR_NO_SPILL_OUT
, sizeof(XATTR_NO_SPILL_OUT
)))
4642 r
= _fgetattrs(**fd
, aset
);
4644 fd
= FDRef(); // defensive
4650 dout(10) << __FUNC__
<< ": no xattr exists in object_map r = " << r
<< dendl
;
4654 r
= get_index(cid
, &index
);
4656 dout(10) << __FUNC__
<< ": could not get index r = " << r
<< dendl
;
4660 r
= object_map
->get_all_xattrs(oid
, &omap_attrs
);
4661 if (r
< 0 && r
!= -ENOENT
) {
4662 dout(10) << __FUNC__
<< ": could not get omap_attrs r = " << r
<< dendl
;
4666 r
= object_map
->get_xattrs(oid
, omap_attrs
, &omap_aset
);
4667 if (r
< 0 && r
!= -ENOENT
) {
4668 dout(10) << __FUNC__
<< ": could not get omap_attrs r = " << r
<< dendl
;
4674 ceph_assert(omap_attrs
.size() == omap_aset
.size());
4675 for (map
<string
, bufferlist
>::iterator i
= omap_aset
.begin();
4676 i
!= omap_aset
.end();
4678 string
key(i
->first
);
4679 aset
.insert(make_pair(key
,
4680 bufferptr(i
->second
.c_str(), i
->second
.length())));
4683 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " = " << r
<< dendl
;
4684 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
4686 if (cct
->_conf
->filestore_debug_inject_read_err
&&
4687 debug_mdata_eio(oid
)) {
4690 tracepoint(objectstore
, getattrs_exit
, r
);
4695 int FileStore::_setattrs(const coll_t
& cid
, const ghobject_t
& oid
, map
<string
,bufferptr
>& aset
,
4696 const SequencerPosition
&spos
)
4698 map
<string
, bufferlist
> omap_set
;
4699 set
<string
> omap_remove
;
4700 map
<string
, bufferptr
> inline_set
;
4701 map
<string
, bufferptr
> inline_to_set
;
4704 bool incomplete_inline
= false;
4706 int r
= lfn_open(cid
, oid
, false, &fd
);
4712 r
= chain_fgetxattr(**fd
, XATTR_SPILL_OUT_NAME
, buf
, sizeof(buf
));
4713 if (r
>= 0 && !strncmp(buf
, XATTR_NO_SPILL_OUT
, sizeof(XATTR_NO_SPILL_OUT
)))
4718 r
= _fgetattrs(**fd
, inline_set
);
4719 incomplete_inline
= (r
== -E2BIG
);
4720 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
4721 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
4722 << (incomplete_inline
? " (incomplete_inline, forcing omap)" : "")
4725 for (map
<string
,bufferptr
>::iterator p
= aset
.begin();
4728 char n
[CHAIN_XATTR_MAX_NAME_LEN
];
4729 get_attrname(p
->first
.c_str(), n
, CHAIN_XATTR_MAX_NAME_LEN
);
4731 if (incomplete_inline
) {
4732 chain_fremovexattr(**fd
, n
); // ignore any error
4733 omap_set
[p
->first
].push_back(p
->second
);
4737 if (p
->second
.length() > m_filestore_max_inline_xattr_size
) {
4738 if (inline_set
.count(p
->first
)) {
4739 inline_set
.erase(p
->first
);
4740 r
= chain_fremovexattr(**fd
, n
);
4744 omap_set
[p
->first
].push_back(p
->second
);
4748 if (!inline_set
.count(p
->first
) &&
4749 inline_set
.size() >= m_filestore_max_inline_xattrs
) {
4750 omap_set
[p
->first
].push_back(p
->second
);
4753 omap_remove
.insert(p
->first
);
4754 inline_set
.insert(*p
);
4756 inline_to_set
.insert(*p
);
4759 if (spill_out
!= 1 && !omap_set
.empty()) {
4760 chain_fsetxattr(**fd
, XATTR_SPILL_OUT_NAME
, XATTR_SPILL_OUT
,
4761 sizeof(XATTR_SPILL_OUT
));
4764 r
= _fsetattrs(**fd
, inline_to_set
);
4768 if (spill_out
&& !omap_remove
.empty()) {
4769 r
= object_map
->remove_xattrs(oid
, omap_remove
, &spos
);
4770 if (r
< 0 && r
!= -ENOENT
) {
4771 dout(10) << __FUNC__
<< ": could not remove_xattrs r = " << r
<< dendl
;
4772 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
4775 r
= 0; // don't confuse the debug output
4779 if (!omap_set
.empty()) {
4780 r
= object_map
->set_xattrs(oid
, omap_set
, &spos
);
4782 dout(10) << __FUNC__
<< ": could not set_xattrs r = " << r
<< dendl
;
4783 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
4790 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " = " << r
<< dendl
;
4795 int FileStore::_rmattr(const coll_t
& cid
, const ghobject_t
& oid
, const char *name
,
4796 const SequencerPosition
&spos
)
4798 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " '" << name
<< "'" << dendl
;
4800 bool spill_out
= true;
4802 int r
= lfn_open(cid
, oid
, false, &fd
);
4808 r
= chain_fgetxattr(**fd
, XATTR_SPILL_OUT_NAME
, buf
, sizeof(buf
));
4809 if (r
>= 0 && !strncmp(buf
, XATTR_NO_SPILL_OUT
, sizeof(XATTR_NO_SPILL_OUT
))) {
4813 char n
[CHAIN_XATTR_MAX_NAME_LEN
];
4814 get_attrname(name
, n
, CHAIN_XATTR_MAX_NAME_LEN
);
4815 r
= chain_fremovexattr(**fd
, n
);
4816 if (r
== -ENODATA
&& spill_out
) {
4818 r
= get_index(cid
, &index
);
4820 dout(10) << __FUNC__
<< ": could not get index r = " << r
<< dendl
;
4823 set
<string
> to_remove
;
4824 to_remove
.insert(string(name
));
4825 r
= object_map
->remove_xattrs(oid
, to_remove
, &spos
);
4826 if (r
< 0 && r
!= -ENOENT
) {
4827 dout(10) << __FUNC__
<< ": could not remove_xattrs index r = " << r
<< dendl
;
4828 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
4835 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " '" << name
<< "' = " << r
<< dendl
;
4839 int FileStore::_rmattrs(const coll_t
& cid
, const ghobject_t
& oid
,
4840 const SequencerPosition
&spos
)
4842 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< dendl
;
4844 map
<string
,bufferptr
> aset
;
4846 set
<string
> omap_attrs
;
4848 bool spill_out
= true;
4850 int r
= lfn_open(cid
, oid
, false, &fd
);
4856 r
= chain_fgetxattr(**fd
, XATTR_SPILL_OUT_NAME
, buf
, sizeof(buf
));
4857 if (r
>= 0 && !strncmp(buf
, XATTR_NO_SPILL_OUT
, sizeof(XATTR_NO_SPILL_OUT
))) {
4861 r
= _fgetattrs(**fd
, aset
);
4863 for (map
<string
,bufferptr
>::iterator p
= aset
.begin(); p
!= aset
.end(); ++p
) {
4864 char n
[CHAIN_XATTR_MAX_NAME_LEN
];
4865 get_attrname(p
->first
.c_str(), n
, CHAIN_XATTR_MAX_NAME_LEN
);
4866 r
= chain_fremovexattr(**fd
, n
);
4868 dout(10) << __FUNC__
<< ": could not remove xattr r = " << r
<< dendl
;
4875 dout(10) << __FUNC__
<< ": no xattr exists in object_map r = " << r
<< dendl
;
4879 r
= get_index(cid
, &index
);
4881 dout(10) << __FUNC__
<< ": could not get index r = " << r
<< dendl
;
4885 r
= object_map
->get_all_xattrs(oid
, &omap_attrs
);
4886 if (r
< 0 && r
!= -ENOENT
) {
4887 dout(10) << __FUNC__
<< ": could not get omap_attrs r = " << r
<< dendl
;
4888 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
4891 r
= object_map
->remove_xattrs(oid
, omap_attrs
, &spos
);
4892 if (r
< 0 && r
!= -ENOENT
) {
4893 dout(10) << __FUNC__
<< ": could not remove omap_attrs r = " << r
<< dendl
;
4898 chain_fsetxattr(**fd
, XATTR_SPILL_OUT_NAME
, XATTR_NO_SPILL_OUT
,
4899 sizeof(XATTR_NO_SPILL_OUT
));
4905 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " = " << r
<< dendl
;
4912 int FileStore::_collection_remove_recursive(const coll_t
&cid
,
4913 const SequencerPosition
&spos
)
4916 int r
= collection_stat(cid
, &st
);
4923 vector
<ghobject_t
> objects
;
4925 while (!max
.is_max()) {
4926 r
= collection_list(cid
, max
, ghobject_t::get_max(),
4927 300, &objects
, &max
);
4930 for (vector
<ghobject_t
>::iterator i
= objects
.begin();
4933 ceph_assert(_check_replay_guard(cid
, *i
, spos
));
4934 r
= _remove(cid
, *i
, spos
);
4940 return _destroy_collection(cid
);
4943 // --------------------------
4946 int FileStore::list_collections(vector
<coll_t
>& ls
)
4948 return list_collections(ls
, false);
4951 int FileStore::list_collections(vector
<coll_t
>& ls
, bool include_temp
)
4953 tracepoint(objectstore
, list_collections_enter
);
4954 dout(10) << __FUNC__
<< dendl
;
4957 snprintf(fn
, sizeof(fn
), "%s/current", basedir
.c_str());
4960 DIR *dir
= ::opendir(fn
);
4963 derr
<< "tried opening directory " << fn
<< ": " << cpp_strerror(-r
) << dendl
;
4964 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
4968 struct dirent
*de
= nullptr;
4969 while ((de
= ::readdir(dir
))) {
4970 if (de
->d_type
== DT_UNKNOWN
) {
4971 // d_type not supported (non-ext[234], btrfs), must stat
4973 char filename
[PATH_MAX
];
4974 if (int n
= snprintf(filename
, sizeof(filename
), "%s/%s", fn
, de
->d_name
);
4975 n
>= static_cast<int>(sizeof(filename
))) {
4976 derr
<< __func__
<< " path length overrun: " << n
<< dendl
;
4980 r
= ::stat(filename
, &sb
);
4983 derr
<< "stat on " << filename
<< ": " << cpp_strerror(-r
) << dendl
;
4984 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
4987 if (!S_ISDIR(sb
.st_mode
)) {
4990 } else if (de
->d_type
!= DT_DIR
) {
4993 if (strcmp(de
->d_name
, "omap") == 0) {
4996 if (de
->d_name
[0] == '.' &&
4997 (de
->d_name
[1] == '\0' ||
4998 (de
->d_name
[1] == '.' &&
4999 de
->d_name
[2] == '\0')))
5002 if (!cid
.parse(de
->d_name
)) {
5003 derr
<< "ignoring invalid collection '" << de
->d_name
<< "'" << dendl
;
5006 if (!cid
.is_temp() || include_temp
)
5011 derr
<< "trying readdir " << fn
<< ": " << cpp_strerror(r
) << dendl
;
5016 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
5017 tracepoint(objectstore
, list_collections_exit
, r
);
5021 int FileStore::collection_stat(const coll_t
& c
, struct stat
*st
)
5023 tracepoint(objectstore
, collection_stat_enter
, c
.c_str());
5025 get_cdir(c
, fn
, sizeof(fn
));
5026 dout(15) << __FUNC__
<< ": " << fn
<< dendl
;
5027 int r
= ::stat(fn
, st
);
5030 dout(10) << __FUNC__
<< ": " << fn
<< " = " << r
<< dendl
;
5031 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
5032 tracepoint(objectstore
, collection_stat_exit
, r
);
5036 bool FileStore::collection_exists(const coll_t
& c
)
5038 tracepoint(objectstore
, collection_exists_enter
, c
.c_str());
5040 bool ret
= collection_stat(c
, &st
) == 0;
5041 tracepoint(objectstore
, collection_exists_exit
, ret
);
5045 int FileStore::collection_empty(const coll_t
& cid
, bool *empty
)
5047 tracepoint(objectstore
, collection_empty_enter
, cid
.c_str());
5048 dout(15) << __FUNC__
<< ": " << cid
<< dendl
;
5050 int r
= get_index(cid
, &index
);
5052 derr
<< __FUNC__
<< ": get_index returned: " << cpp_strerror(r
)
5057 ceph_assert(index
.index
);
5058 std::shared_lock l
{(index
.index
)->access_lock
};
5060 vector
<ghobject_t
> ls
;
5061 r
= index
->collection_list_partial(ghobject_t(), ghobject_t::get_max(),
5064 derr
<< __FUNC__
<< ": collection_list_partial returned: "
5065 << cpp_strerror(r
) << dendl
;
5066 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
5069 *empty
= ls
.empty();
5070 tracepoint(objectstore
, collection_empty_exit
, *empty
);
5074 int FileStore::_collection_set_bits(const coll_t
& c
, int bits
)
5077 get_cdir(c
, fn
, sizeof(fn
));
5078 dout(10) << __FUNC__
<< ": " << fn
<< " " << bits
<< dendl
;
5082 int fd
= ::open(fn
, O_RDONLY
|O_CLOEXEC
);
5087 get_attrname("bits", n
, PATH_MAX
);
5088 r
= chain_fsetxattr(fd
, n
, (char*)&v
, sizeof(v
));
5089 VOID_TEMP_FAILURE_RETRY(::close(fd
));
5091 dout(10) << __FUNC__
<< ": " << fn
<< " " << bits
<< " = " << r
<< dendl
;
5095 int FileStore::collection_bits(CollectionHandle
& ch
)
5098 get_cdir(ch
->cid
, fn
, sizeof(fn
));
5099 dout(15) << __FUNC__
<< ": " << fn
<< dendl
;
5103 int fd
= ::open(fn
, O_RDONLY
|O_CLOEXEC
);
5108 get_attrname("bits", n
, PATH_MAX
);
5109 r
= chain_fgetxattr(fd
, n
, (char*)&bits
, sizeof(bits
));
5110 VOID_TEMP_FAILURE_RETRY(::close(fd
));
5116 dout(10) << __FUNC__
<< ": " << fn
<< " = " << bits
<< dendl
;
5120 int FileStore::collection_list(const coll_t
& c
,
5121 const ghobject_t
& orig_start
,
5122 const ghobject_t
& end
,
5124 vector
<ghobject_t
> *ls
, ghobject_t
*next
)
5126 ghobject_t start
= orig_start
;
5130 ghobject_t temp_next
;
5133 // figure out the pool id. we need this in order to generate a
5134 // meaningful 'next' value.
5139 if (c
.is_temp(&pgid
)) {
5140 pool
= -2 - pgid
.pool();
5142 } else if (c
.is_pg(&pgid
)) {
5145 } else if (c
.is_meta()) {
5147 shard
= shard_id_t::NO_SHARD
;
5149 // hrm, the caller is test code! we should get kill it off. for now,
5152 shard
= shard_id_t::NO_SHARD
;
5154 dout(20) << __FUNC__
<< ": pool is " << pool
<< " shard is " << shard
5155 << " pgid " << pgid
<< dendl
;
5159 sep
.set_shard(shard
);
5160 if (!c
.is_temp() && !c
.is_meta()) {
5162 dout(10) << __FUNC__
<< ": first checking temp pool" << dendl
;
5163 coll_t temp
= c
.get_temp();
5164 int r
= collection_list(temp
, start
, end
, max
, ls
, next
);
5167 if (*next
!= ghobject_t::get_max())
5170 dout(10) << __FUNC__
<< ": fall through to non-temp collection, start "
5173 dout(10) << __FUNC__
<< ": start " << start
<< " >= sep " << sep
<< dendl
;
5178 int r
= get_index(c
, &index
);
5182 ceph_assert(index
.index
);
5183 std::shared_lock l
{(index
.index
)->access_lock
};
5185 r
= index
->collection_list_partial(start
, end
, max
, ls
, next
);
5188 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
5191 dout(20) << "objects: " << *ls
<< dendl
;
5193 // HashIndex doesn't know the pool when constructing a 'next' value
5194 if (!next
->is_max()) {
5195 next
->hobj
.pool
= pool
;
5196 next
->set_shard(shard
);
5197 dout(20) << " next " << *next
<< dendl
;
5203 int FileStore::omap_get(CollectionHandle
& ch
, const ghobject_t
&hoid
,
5205 map
<string
, bufferlist
> *out
)
5207 tracepoint(objectstore
, omap_get_enter
, ch
->cid
.c_str());
5208 const coll_t
& c
= !_need_temp_object_collection(ch
->cid
, hoid
) ? ch
->cid
: ch
->cid
.get_temp();
5209 dout(15) << __FUNC__
<< ": " << c
<< "/" << hoid
<< dendl
;
5211 auto osr
= static_cast<OpSequencer
*>(ch
.get());
5212 osr
->wait_for_apply(hoid
);
5215 int r
= get_index(c
, &index
);
5219 ceph_assert(index
.index
);
5220 std::shared_lock l
{(index
.index
)->access_lock
};
5221 r
= lfn_find(hoid
, index
);
5225 r
= object_map
->get(hoid
, header
, out
);
5226 if (r
< 0 && r
!= -ENOENT
) {
5227 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
5230 tracepoint(objectstore
, omap_get_exit
, 0);
5234 int FileStore::omap_get_header(
5235 CollectionHandle
& ch
,
5236 const ghobject_t
&hoid
,
5240 tracepoint(objectstore
, omap_get_header_enter
, ch
->cid
.c_str());
5241 const coll_t
& c
= !_need_temp_object_collection(ch
->cid
, hoid
) ? ch
->cid
: ch
->cid
.get_temp();
5242 dout(15) << __FUNC__
<< ": " << c
<< "/" << hoid
<< dendl
;
5244 auto osr
= static_cast<OpSequencer
*>(ch
.get());
5245 osr
->wait_for_apply(hoid
);
5248 int r
= get_index(c
, &index
);
5252 ceph_assert(index
.index
);
5253 std::shared_lock l
{(index
.index
)->access_lock
};
5254 r
= lfn_find(hoid
, index
);
5258 r
= object_map
->get_header(hoid
, bl
);
5259 if (r
< 0 && r
!= -ENOENT
) {
5260 ceph_assert(allow_eio
|| !m_filestore_fail_eio
|| r
!= -EIO
);
5263 tracepoint(objectstore
, omap_get_header_exit
, 0);
5267 int FileStore::omap_get_keys(CollectionHandle
& ch
, const ghobject_t
&hoid
, set
<string
> *keys
)
5269 tracepoint(objectstore
, omap_get_keys_enter
, ch
->cid
.c_str());
5270 const coll_t
& c
= !_need_temp_object_collection(ch
->cid
, hoid
) ? ch
->cid
: ch
->cid
.get_temp();
5271 dout(15) << __FUNC__
<< ": " << c
<< "/" << hoid
<< dendl
;
5273 auto osr
= static_cast<OpSequencer
*>(ch
.get());
5274 osr
->wait_for_apply(hoid
);
5277 int r
= get_index(c
, &index
);
5281 ceph_assert(index
.index
);
5282 std::shared_lock l
{(index
.index
)->access_lock
};
5283 r
= lfn_find(hoid
, index
);
5287 r
= object_map
->get_keys(hoid
, keys
);
5288 if (r
< 0 && r
!= -ENOENT
) {
5289 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
5292 tracepoint(objectstore
, omap_get_keys_exit
, 0);
5296 int FileStore::omap_get_values(CollectionHandle
& ch
, const ghobject_t
&hoid
,
5297 const set
<string
> &keys
,
5298 map
<string
, bufferlist
> *out
)
5300 tracepoint(objectstore
, omap_get_values_enter
, ch
->cid
.c_str());
5301 const coll_t
& c
= !_need_temp_object_collection(ch
->cid
, hoid
) ? ch
->cid
: ch
->cid
.get_temp();
5302 dout(15) << __FUNC__
<< ": " << c
<< "/" << hoid
<< dendl
;
5304 auto osr
= static_cast<OpSequencer
*>(ch
.get());
5305 osr
->wait_for_apply(hoid
);
5308 const char *where
= "()";
5309 int r
= get_index(c
, &index
);
5311 where
= " (get_index)";
5315 ceph_assert(index
.index
);
5316 std::shared_lock l
{(index
.index
)->access_lock
};
5317 r
= lfn_find(hoid
, index
);
5319 where
= " (lfn_find)";
5323 r
= object_map
->get_values(hoid
, keys
, out
);
5324 if (r
< 0 && r
!= -ENOENT
) {
5325 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
5326 where
= " (get_values)";
5331 tracepoint(objectstore
, omap_get_values_exit
, r
);
5332 dout(15) << __FUNC__
<< ": " << c
<< "/" << hoid
<< " = " << r
5337 int FileStore::omap_check_keys(CollectionHandle
& ch
, const ghobject_t
&hoid
,
5338 const set
<string
> &keys
,
5341 tracepoint(objectstore
, omap_check_keys_enter
, ch
->cid
.c_str());
5342 const coll_t
& c
= !_need_temp_object_collection(ch
->cid
, hoid
) ? ch
->cid
: ch
->cid
.get_temp();
5343 dout(15) << __FUNC__
<< ": " << c
<< "/" << hoid
<< dendl
;
5345 auto osr
= static_cast<OpSequencer
*>(ch
.get());
5346 osr
->wait_for_apply(hoid
);
5349 int r
= get_index(c
, &index
);
5353 ceph_assert(index
.index
);
5354 std::shared_lock l
{(index
.index
)->access_lock
};
5355 r
= lfn_find(hoid
, index
);
5359 r
= object_map
->check_keys(hoid
, keys
, out
);
5360 if (r
< 0 && r
!= -ENOENT
) {
5361 if (r
== -EIO
&& m_filestore_fail_eio
) handle_eio();
5364 tracepoint(objectstore
, omap_check_keys_exit
, 0);
5368 ObjectMap::ObjectMapIterator
FileStore::get_omap_iterator(
5369 CollectionHandle
& ch
,
5370 const ghobject_t
&oid
)
5372 auto osr
= static_cast<OpSequencer
*>(ch
.get());
5373 osr
->wait_for_apply(oid
);
5374 return get_omap_iterator(ch
->cid
, oid
);
5377 ObjectMap::ObjectMapIterator
FileStore::get_omap_iterator(const coll_t
& _c
,
5378 const ghobject_t
&hoid
)
5380 tracepoint(objectstore
, get_omap_iterator
, _c
.c_str());
5381 const coll_t
& c
= !_need_temp_object_collection(_c
, hoid
) ? _c
: _c
.get_temp();
5382 dout(15) << __FUNC__
<< ": " << c
<< "/" << hoid
<< dendl
;
5384 int r
= get_index(c
, &index
);
5386 dout(10) << __FUNC__
<< ": " << c
<< "/" << hoid
<< " = 0 "
5387 << "(get_index failed with " << cpp_strerror(r
) << ")" << dendl
;
5388 return ObjectMap::ObjectMapIterator();
5391 ceph_assert(index
.index
);
5392 std::shared_lock l
{(index
.index
)->access_lock
};
5393 r
= lfn_find(hoid
, index
);
5395 dout(10) << __FUNC__
<< ": " << c
<< "/" << hoid
<< " = 0 "
5396 << "(lfn_find failed with " << cpp_strerror(r
) << ")" << dendl
;
5397 return ObjectMap::ObjectMapIterator();
5400 return object_map
->get_iterator(hoid
);
5403 int FileStore::_collection_hint_expected_num_objs(const coll_t
& c
, uint32_t pg_num
,
5404 uint64_t expected_num_objs
,
5405 const SequencerPosition
&spos
)
5407 dout(15) << __FUNC__
<< ": collection: " << c
<< " pg number: "
5408 << pg_num
<< " expected number of objects: " << expected_num_objs
<< dendl
;
5411 int ret
= collection_empty(c
, &empty
);
5414 if (!empty
&& !replaying
) {
5415 dout(0) << "Failed to give an expected number of objects hint to collection : "
5416 << c
<< ", only empty collection can take such type of hint. " << dendl
;
5421 ret
= get_index(c
, &index
);
5424 // Pre-hash the collection
5425 ret
= index
->pre_hash_collection(pg_num
, expected_num_objs
);
5426 dout(10) << "pre_hash_collection " << c
<< " = " << ret
<< dendl
;
5429 _set_replay_guard(c
, spos
);
5434 int FileStore::_create_collection(
5437 const SequencerPosition
&spos
)
5440 get_cdir(c
, fn
, sizeof(fn
));
5441 dout(15) << __FUNC__
<< ": " << fn
<< dendl
;
5442 int r
= ::mkdir(fn
, 0755);
5445 if (r
== -EEXIST
&& replaying
)
5447 dout(10) << __FUNC__
<< ": " << fn
<< " = " << r
<< dendl
;
5454 r
= _collection_set_bits(c
, bits
);
5457 // create parallel temp collection, too
5458 if (!c
.is_meta() && !c
.is_temp()) {
5459 coll_t temp
= c
.get_temp();
5460 r
= _create_collection(temp
, 0, spos
);
5465 _set_replay_guard(c
, spos
);
5469 int FileStore::_destroy_collection(const coll_t
& c
)
5473 get_cdir(c
, fn
, sizeof(fn
));
5474 dout(15) << __FUNC__
<< ": " << fn
<< dendl
;
5477 r
= get_index(c
, &from
);
5480 ceph_assert(from
.index
);
5481 std::unique_lock l
{(from
.index
)->access_lock
};
5483 r
= from
->prep_delete();
5494 // destroy parallel temp collection, too
5495 if (!c
.is_meta() && !c
.is_temp()) {
5496 coll_t temp
= c
.get_temp();
5497 int r2
= _destroy_collection(temp
);
5505 dout(10) << __FUNC__
<< ": " << fn
<< " = " << r
<< dendl
;
5510 int FileStore::_collection_add(const coll_t
& c
, const coll_t
& oldcid
, const ghobject_t
& o
,
5511 const SequencerPosition
& spos
)
5513 dout(15) << __FUNC__
<< ": " << c
<< "/" << o
<< " from " << oldcid
<< "/" << o
<< dendl
;
5515 int dstcmp
= _check_replay_guard(c
, o
, spos
);
5519 // check the src name too; it might have a newer guard, and we don't
5520 // want to clobber it
5521 int srccmp
= _check_replay_guard(oldcid
, o
, spos
);
5525 // open guard on object so we don't any previous operations on the
5526 // new name that will modify the source inode.
5528 int r
= lfn_open(oldcid
, o
, 0, &fd
);
5530 // the source collection/object does not exist. If we are replaying, we
5531 // should be safe, so just return 0 and move on.
5532 ceph_assert(replaying
);
5533 dout(10) << __FUNC__
<< ": " << c
<< "/" << o
<< " from "
5534 << oldcid
<< "/" << o
<< " (dne, continue replay) " << dendl
;
5537 if (dstcmp
> 0) { // if dstcmp == 0 the guard already says "in-progress"
5538 _set_replay_guard(**fd
, spos
, &o
, true);
5541 r
= lfn_link(oldcid
, c
, o
, o
);
5542 if (replaying
&& !backend
->can_checkpoint() &&
5543 r
== -EEXIST
) // crashed between link() and set_replay_guard()
5548 // close guard on object so we don't do this again
5550 _close_replay_guard(**fd
, spos
);
5554 dout(10) << __FUNC__
<< ": " << c
<< "/" << o
<< " from " << oldcid
<< "/" << o
<< " = " << r
<< dendl
;
5558 int FileStore::_collection_move_rename(const coll_t
& oldcid
, const ghobject_t
& oldoid
,
5559 coll_t c
, const ghobject_t
& o
,
5560 const SequencerPosition
& spos
,
5563 dout(15) << __FUNC__
<< ": " << c
<< "/" << o
<< " from " << oldcid
<< "/" << oldoid
<< dendl
;
5568 /* If the destination collection doesn't exist during replay,
5569 * we need to delete the src object and continue on
5571 if (!collection_exists(c
))
5575 dstcmp
= _check_replay_guard(c
, o
, spos
);
5579 // check the src name too; it might have a newer guard, and we don't
5580 // want to clobber it
5581 srccmp
= _check_replay_guard(oldcid
, oldoid
, spos
);
5586 // open guard on object so we don't any previous operations on the
5587 // new name that will modify the source inode.
5589 r
= lfn_open(oldcid
, oldoid
, 0, &fd
);
5591 // the source collection/object does not exist. If we are replaying, we
5592 // should be safe, so just return 0 and move on.
5594 dout(10) << __FUNC__
<< ": " << c
<< "/" << o
<< " from "
5595 << oldcid
<< "/" << oldoid
<< " (dne, continue replay) " << dendl
;
5596 } else if (allow_enoent
) {
5597 dout(10) << __FUNC__
<< ": " << c
<< "/" << o
<< " from "
5598 << oldcid
<< "/" << oldoid
<< " (dne, ignoring enoent)"
5601 ceph_abort_msg("ERROR: source must exist");
5607 if (allow_enoent
&& dstcmp
> 0) { // if dstcmp == 0, try_rename was started.
5611 r
= 0; // don't know if object_map was cloned
5613 if (dstcmp
> 0) { // if dstcmp == 0 the guard already says "in-progress"
5614 _set_replay_guard(**fd
, spos
, &o
, true);
5617 r
= lfn_link(oldcid
, c
, oldoid
, o
);
5618 if (replaying
&& !backend
->can_checkpoint() &&
5619 r
== -EEXIST
) // crashed between link() and set_replay_guard()
5629 // the name changed; link the omap content
5630 r
= object_map
->rename(oldoid
, o
, &spos
);
5638 r
= lfn_unlink(oldcid
, oldoid
, spos
, true);
5641 r
= lfn_open(c
, o
, 0, &fd
);
5643 // close guard on object so we don't do this again
5645 _close_replay_guard(**fd
, spos
, &o
);
5650 dout(10) << __FUNC__
<< ": " << c
<< "/" << o
<< " from " << oldcid
<< "/" << oldoid
5651 << " = " << r
<< dendl
;
5656 if (_check_replay_guard(oldcid
, oldoid
, spos
) > 0) {
5657 r
= lfn_unlink(oldcid
, oldoid
, spos
, true);
5660 dout(10) << __FUNC__
<< ": " << c
<< "/" << o
<< " from " << oldcid
<< "/" << oldoid
5661 << " = " << r
<< dendl
;
5665 void FileStore::_inject_failure()
5667 if (m_filestore_kill_at
) {
5668 int final
= --m_filestore_kill_at
;
5669 dout(5) << __FUNC__
<< ": " << (final
+1) << " -> " << final
<< dendl
;
5671 derr
<< __FUNC__
<< ": KILLING" << dendl
;
5678 int FileStore::_omap_clear(const coll_t
& cid
, const ghobject_t
&hoid
,
5679 const SequencerPosition
&spos
) {
5680 dout(15) << __FUNC__
<< ": " << cid
<< "/" << hoid
<< dendl
;
5682 int r
= get_index(cid
, &index
);
5686 ceph_assert(index
.index
);
5687 std::shared_lock l
{(index
.index
)->access_lock
};
5688 r
= lfn_find(hoid
, index
);
5692 r
= object_map
->clear_keys_header(hoid
, &spos
);
5693 if (r
< 0 && r
!= -ENOENT
)
5698 int FileStore::_omap_setkeys(const coll_t
& cid
, const ghobject_t
&hoid
,
5699 const map
<string
, bufferlist
> &aset
,
5700 const SequencerPosition
&spos
) {
5701 dout(15) << __FUNC__
<< ": " << cid
<< "/" << hoid
<< dendl
;
5704 //treat pgmeta as a logical object, skip to check exist
5705 if (hoid
.is_pgmeta())
5708 r
= get_index(cid
, &index
);
5710 dout(20) << __FUNC__
<< ": get_index got " << cpp_strerror(r
) << dendl
;
5714 ceph_assert(index
.index
);
5715 std::shared_lock l
{(index
.index
)->access_lock
};
5716 r
= lfn_find(hoid
, index
);
5718 dout(20) << __FUNC__
<< ": lfn_find got " << cpp_strerror(r
) << dendl
;
5723 if (g_conf()->subsys
.should_gather
<ceph_subsys_filestore
, 20>()) {
5724 for (auto& p
: aset
) {
5725 dout(20) << __FUNC__
<< ": set " << p
.first
<< dendl
;
5728 r
= object_map
->set_keys(hoid
, aset
, &spos
);
5729 dout(20) << __FUNC__
<< ": " << cid
<< "/" << hoid
<< " = " << r
<< dendl
;
5733 int FileStore::_omap_rmkeys(const coll_t
& cid
, const ghobject_t
&hoid
,
5734 const set
<string
> &keys
,
5735 const SequencerPosition
&spos
) {
5736 dout(15) << __FUNC__
<< ": " << cid
<< "/" << hoid
<< dendl
;
5739 //treat pgmeta as a logical object, skip to check exist
5740 if (hoid
.is_pgmeta())
5743 r
= get_index(cid
, &index
);
5747 ceph_assert(index
.index
);
5748 std::shared_lock l
{(index
.index
)->access_lock
};
5749 r
= lfn_find(hoid
, index
);
5754 r
= object_map
->rm_keys(hoid
, keys
, &spos
);
5755 if (r
< 0 && r
!= -ENOENT
)
5760 int FileStore::_omap_rmkeyrange(const coll_t
& cid
, const ghobject_t
&hoid
,
5761 const string
& first
, const string
& last
,
5762 const SequencerPosition
&spos
) {
5763 dout(15) << __FUNC__
<< ": " << cid
<< "/" << hoid
<< " [" << first
<< "," << last
<< "]" << dendl
;
5766 ObjectMap::ObjectMapIterator iter
= get_omap_iterator(cid
, hoid
);
5769 for (iter
->lower_bound(first
); iter
->valid() && iter
->key() < last
;
5771 keys
.insert(iter
->key());
5774 return _omap_rmkeys(cid
, hoid
, keys
, spos
);
5777 int FileStore::_omap_setheader(const coll_t
& cid
, const ghobject_t
&hoid
,
5778 const bufferlist
&bl
,
5779 const SequencerPosition
&spos
)
5781 dout(15) << __FUNC__
<< ": " << cid
<< "/" << hoid
<< dendl
;
5783 int r
= get_index(cid
, &index
);
5787 ceph_assert(index
.index
);
5788 std::shared_lock l
{(index
.index
)->access_lock
};
5789 r
= lfn_find(hoid
, index
);
5793 return object_map
->set_header(hoid
, bl
, &spos
);
5796 int FileStore::_merge_collection(const coll_t
& cid
,
5799 const SequencerPosition
&spos
)
5801 dout(15) << __FUNC__
<< ": " << cid
<< " " << dest
5802 << " bits " << bits
<< dendl
;
5805 if (!collection_exists(cid
)) {
5806 dout(2) << __FUNC__
<< ": " << cid
<< " DNE" << dendl
;
5807 ceph_assert(replaying
);
5810 if (!collection_exists(dest
)) {
5811 dout(2) << __FUNC__
<< ": " << dest
<< " DNE" << dendl
;
5812 ceph_assert(replaying
);
5817 if (_check_replay_guard(cid
, spos
) > 0)
5818 _collection_set_bits(dest
, bits
);
5821 bool is_pg
= dest
.is_pg(&pgid
);
5824 int dstcmp
= _check_replay_guard(dest
, spos
);
5828 int srccmp
= _check_replay_guard(cid
, spos
);
5832 _set_global_replay_guard(cid
, spos
);
5833 _set_replay_guard(cid
, spos
, true);
5834 _set_replay_guard(dest
, spos
, true);
5839 r
= get_index(cid
, &from
);
5843 r
= get_index(dest
, &to
);
5846 ceph_assert(from
.index
);
5847 std::unique_lock l1
{(from
.index
)->access_lock
};
5849 ceph_assert(to
.index
);
5850 std::unique_lock l2
{(to
.index
)->access_lock
};
5852 r
= from
->merge(bits
, to
.index
);
5859 r
= get_index(cid
.get_temp(), &from
);
5863 r
= get_index(dest
.get_temp(), &to
);
5866 ceph_assert(from
.index
);
5867 std::unique_lock l1
{(from
.index
)->access_lock
};
5869 ceph_assert(to
.index
);
5870 std::unique_lock l2
{(to
.index
)->access_lock
};
5872 r
= from
->merge(bits
, to
.index
);
5877 _destroy_collection(cid
);
5879 _close_replay_guard(dest
, spos
);
5880 _close_replay_guard(dest
.get_temp(), spos
);
5881 // no need to close guards on cid... it's removed.
5883 if (!r
&& cct
->_conf
->filestore_debug_verify_split
) {
5884 vector
<ghobject_t
> objects
;
5889 next
, ghobject_t::get_max(),
5890 get_ideal_list_max(),
5893 if (objects
.empty())
5895 for (vector
<ghobject_t
>::iterator i
= objects
.begin();
5898 if (!i
->match(bits
, pgid
.pgid
.ps())) {
5899 dout(20) << __FUNC__
<< ": " << *i
<< " does not belong in "
5901 ceph_assert(i
->match(bits
, pgid
.pgid
.ps()));
5908 dout(15) << __FUNC__
<< ": " << cid
<< " " << dest
<< " bits " << bits
5909 << " = " << r
<< dendl
;
5913 int FileStore::_split_collection(const coll_t
& cid
,
5917 const SequencerPosition
&spos
)
5921 dout(15) << __FUNC__
<< ": " << cid
<< " bits: " << bits
<< dendl
;
5922 if (!collection_exists(cid
)) {
5923 dout(2) << __FUNC__
<< ": " << cid
<< " DNE" << dendl
;
5924 ceph_assert(replaying
);
5927 if (!collection_exists(dest
)) {
5928 dout(2) << __FUNC__
<< ": " << dest
<< " DNE" << dendl
;
5929 ceph_assert(replaying
);
5933 int dstcmp
= _check_replay_guard(dest
, spos
);
5937 int srccmp
= _check_replay_guard(cid
, spos
);
5941 _set_global_replay_guard(cid
, spos
);
5942 _set_replay_guard(cid
, spos
, true);
5943 _set_replay_guard(dest
, spos
, true);
5946 r
= get_index(cid
, &from
);
5950 r
= get_index(dest
, &to
);
5953 ceph_assert(from
.index
);
5954 std::unique_lock l1
{(from
.index
)->access_lock
};
5956 ceph_assert(to
.index
);
5957 std::unique_lock l2
{(to
.index
)->access_lock
};
5959 r
= from
->split(rem
, bits
, to
.index
);
5962 _close_replay_guard(cid
, spos
);
5963 _close_replay_guard(dest
, spos
);
5965 _collection_set_bits(cid
, bits
);
5966 if (!r
&& cct
->_conf
->filestore_debug_verify_split
) {
5967 vector
<ghobject_t
> objects
;
5972 next
, ghobject_t::get_max(),
5973 get_ideal_list_max(),
5976 if (objects
.empty())
5978 for (vector
<ghobject_t
>::iterator i
= objects
.begin();
5981 dout(20) << __FUNC__
<< ": " << *i
<< " still in source "
5983 ceph_assert(!i
->match(bits
, rem
));
5987 next
= ghobject_t();
5991 next
, ghobject_t::get_max(),
5992 get_ideal_list_max(),
5995 if (objects
.empty())
5997 for (vector
<ghobject_t
>::iterator i
= objects
.begin();
6000 dout(20) << __FUNC__
<< ": " << *i
<< " now in dest "
6002 ceph_assert(i
->match(bits
, rem
));
6010 int FileStore::_set_alloc_hint(const coll_t
& cid
, const ghobject_t
& oid
,
6011 uint64_t expected_object_size
,
6012 uint64_t expected_write_size
)
6014 dout(15) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " object_size " << expected_object_size
<< " write_size " << expected_write_size
<< dendl
;
6019 if (expected_object_size
== 0 || expected_write_size
== 0)
6022 ret
= lfn_open(cid
, oid
, false, &fd
);
6027 // TODO: a more elaborate hint calculation
6028 uint64_t hint
= std::min
<uint64_t>(expected_write_size
, m_filestore_max_alloc_hint_size
);
6030 ret
= backend
->set_alloc_hint(**fd
, hint
);
6031 dout(20) << __FUNC__
<< ": hint " << hint
<< " ret " << ret
<< dendl
;
6036 dout(10) << __FUNC__
<< ": " << cid
<< "/" << oid
<< " object_size " << expected_object_size
<< " write_size " << expected_write_size
<< " = " << ret
<< dendl
;
6037 ceph_assert(!m_filestore_fail_eio
|| ret
!= -EIO
);
6041 const char** FileStore::get_tracked_conf_keys() const
6043 static const char* KEYS
[] = {
6044 "filestore_max_inline_xattr_size",
6045 "filestore_max_inline_xattr_size_xfs",
6046 "filestore_max_inline_xattr_size_btrfs",
6047 "filestore_max_inline_xattr_size_other",
6048 "filestore_max_inline_xattrs",
6049 "filestore_max_inline_xattrs_xfs",
6050 "filestore_max_inline_xattrs_btrfs",
6051 "filestore_max_inline_xattrs_other",
6052 "filestore_max_xattr_value_size",
6053 "filestore_max_xattr_value_size_xfs",
6054 "filestore_max_xattr_value_size_btrfs",
6055 "filestore_max_xattr_value_size_other",
6056 "filestore_min_sync_interval",
6057 "filestore_max_sync_interval",
6058 "filestore_queue_max_ops",
6059 "filestore_queue_max_bytes",
6060 "filestore_expected_throughput_bytes",
6061 "filestore_expected_throughput_ops",
6062 "filestore_queue_low_threshhold",
6063 "filestore_queue_high_threshhold",
6064 "filestore_queue_high_delay_multiple",
6065 "filestore_queue_max_delay_multiple",
6066 "filestore_commit_timeout",
6067 "filestore_dump_file",
6068 "filestore_kill_at",
6069 "filestore_fail_eio",
6070 "filestore_fadvise",
6071 "filestore_sloppy_crc",
6072 "filestore_sloppy_crc_block_size",
6073 "filestore_max_alloc_hint_size",
6079 void FileStore::handle_conf_change(const ConfigProxy
& conf
,
6080 const std::set
<std::string
> &changed
)
6082 if (changed
.count("filestore_max_inline_xattr_size") ||
6083 changed
.count("filestore_max_inline_xattr_size_xfs") ||
6084 changed
.count("filestore_max_inline_xattr_size_btrfs") ||
6085 changed
.count("filestore_max_inline_xattr_size_other") ||
6086 changed
.count("filestore_max_inline_xattrs") ||
6087 changed
.count("filestore_max_inline_xattrs_xfs") ||
6088 changed
.count("filestore_max_inline_xattrs_btrfs") ||
6089 changed
.count("filestore_max_inline_xattrs_other") ||
6090 changed
.count("filestore_max_xattr_value_size") ||
6091 changed
.count("filestore_max_xattr_value_size_xfs") ||
6092 changed
.count("filestore_max_xattr_value_size_btrfs") ||
6093 changed
.count("filestore_max_xattr_value_size_other")) {
6095 std::lock_guard
l(lock
);
6096 set_xattr_limits_via_conf();
6100 if (changed
.count("filestore_queue_max_bytes") ||
6101 changed
.count("filestore_queue_max_ops") ||
6102 changed
.count("filestore_expected_throughput_bytes") ||
6103 changed
.count("filestore_expected_throughput_ops") ||
6104 changed
.count("filestore_queue_low_threshhold") ||
6105 changed
.count("filestore_queue_high_threshhold") ||
6106 changed
.count("filestore_queue_high_delay_multiple") ||
6107 changed
.count("filestore_queue_max_delay_multiple")) {
6108 std::lock_guard
l(lock
);
6109 set_throttle_params();
6112 if (changed
.count("filestore_min_sync_interval") ||
6113 changed
.count("filestore_max_sync_interval") ||
6114 changed
.count("filestore_kill_at") ||
6115 changed
.count("filestore_fail_eio") ||
6116 changed
.count("filestore_sloppy_crc") ||
6117 changed
.count("filestore_sloppy_crc_block_size") ||
6118 changed
.count("filestore_max_alloc_hint_size") ||
6119 changed
.count("filestore_fadvise")) {
6120 std::lock_guard
l(lock
);
6121 m_filestore_min_sync_interval
= conf
->filestore_min_sync_interval
;
6122 m_filestore_max_sync_interval
= conf
->filestore_max_sync_interval
;
6123 m_filestore_kill_at
= conf
->filestore_kill_at
;
6124 m_filestore_fail_eio
= conf
->filestore_fail_eio
;
6125 m_filestore_fadvise
= conf
->filestore_fadvise
;
6126 m_filestore_sloppy_crc
= conf
->filestore_sloppy_crc
;
6127 m_filestore_sloppy_crc_block_size
= conf
->filestore_sloppy_crc_block_size
;
6128 m_filestore_max_alloc_hint_size
= conf
->filestore_max_alloc_hint_size
;
6130 if (changed
.count("filestore_commit_timeout")) {
6131 std::lock_guard
l(sync_entry_timeo_lock
);
6132 m_filestore_commit_timeout
= conf
->filestore_commit_timeout
;
6134 if (changed
.count("filestore_dump_file")) {
6135 if (conf
->filestore_dump_file
.length() &&
6136 conf
->filestore_dump_file
!= "-") {
6137 dump_start(conf
->filestore_dump_file
);
6144 int FileStore::set_throttle_params()
6147 bool valid
= throttle_bytes
.set_params(
6148 cct
->_conf
->filestore_queue_low_threshhold
,
6149 cct
->_conf
->filestore_queue_high_threshhold
,
6150 cct
->_conf
->filestore_expected_throughput_bytes
,
6151 cct
->_conf
->filestore_queue_high_delay_multiple
?
6152 cct
->_conf
->filestore_queue_high_delay_multiple
:
6153 cct
->_conf
->filestore_queue_high_delay_multiple_bytes
,
6154 cct
->_conf
->filestore_queue_max_delay_multiple
?
6155 cct
->_conf
->filestore_queue_max_delay_multiple
:
6156 cct
->_conf
->filestore_queue_max_delay_multiple_bytes
,
6157 cct
->_conf
->filestore_queue_max_bytes
,
6160 valid
&= throttle_ops
.set_params(
6161 cct
->_conf
->filestore_queue_low_threshhold
,
6162 cct
->_conf
->filestore_queue_high_threshhold
,
6163 cct
->_conf
->filestore_expected_throughput_ops
,
6164 cct
->_conf
->filestore_queue_high_delay_multiple
?
6165 cct
->_conf
->filestore_queue_high_delay_multiple
:
6166 cct
->_conf
->filestore_queue_high_delay_multiple_ops
,
6167 cct
->_conf
->filestore_queue_max_delay_multiple
?
6168 cct
->_conf
->filestore_queue_max_delay_multiple
:
6169 cct
->_conf
->filestore_queue_max_delay_multiple_ops
,
6170 cct
->_conf
->filestore_queue_max_ops
,
6173 logger
->set(l_filestore_op_queue_max_ops
, throttle_ops
.get_max());
6174 logger
->set(l_filestore_op_queue_max_bytes
, throttle_bytes
.get_max());
6177 derr
<< "tried to set invalid params: "
6181 return valid
? 0 : -EINVAL
;
6184 void FileStore::dump_start(const std::string
& file
)
6186 dout(10) << __FUNC__
<< ": " << file
<< dendl
;
6187 if (m_filestore_do_dump
) {
6190 m_filestore_dump_fmt
.reset();
6191 m_filestore_dump_fmt
.open_array_section("dump");
6192 m_filestore_dump
.open(file
.c_str());
6193 m_filestore_do_dump
= true;
6196 void FileStore::dump_stop()
6198 dout(10) << __FUNC__
<< dendl
;
6199 m_filestore_do_dump
= false;
6200 if (m_filestore_dump
.is_open()) {
6201 m_filestore_dump_fmt
.close_section();
6202 m_filestore_dump_fmt
.flush(m_filestore_dump
);
6203 m_filestore_dump
.flush();
6204 m_filestore_dump
.close();
6208 void FileStore::dump_transactions(vector
<ObjectStore::Transaction
>& ls
, uint64_t seq
, OpSequencer
*osr
)
6210 m_filestore_dump_fmt
.open_array_section("transactions");
6211 unsigned trans_num
= 0;
6212 for (vector
<ObjectStore::Transaction
>::iterator i
= ls
.begin(); i
!= ls
.end(); ++i
, ++trans_num
) {
6213 m_filestore_dump_fmt
.open_object_section("transaction");
6214 m_filestore_dump_fmt
.dump_stream("osr") << osr
->cid
;
6215 m_filestore_dump_fmt
.dump_unsigned("seq", seq
);
6216 m_filestore_dump_fmt
.dump_unsigned("trans_num", trans_num
);
6217 (*i
).dump(&m_filestore_dump_fmt
);
6218 m_filestore_dump_fmt
.close_section();
6220 m_filestore_dump_fmt
.close_section();
6221 m_filestore_dump_fmt
.flush(m_filestore_dump
);
6222 m_filestore_dump
.flush();
6225 void FileStore::get_db_statistics(Formatter
* f
)
6227 object_map
->db
->get_statistics(f
);
6230 void FileStore::set_xattr_limits_via_conf()
6232 uint32_t fs_xattr_size
;
6234 uint32_t fs_xattr_max_value_size
;
6236 switch (m_fs_type
) {
6237 #if defined(__linux__)
6238 case XFS_SUPER_MAGIC
:
6239 fs_xattr_size
= cct
->_conf
->filestore_max_inline_xattr_size_xfs
;
6240 fs_xattrs
= cct
->_conf
->filestore_max_inline_xattrs_xfs
;
6241 fs_xattr_max_value_size
= cct
->_conf
->filestore_max_xattr_value_size_xfs
;
6243 case BTRFS_SUPER_MAGIC
:
6244 fs_xattr_size
= cct
->_conf
->filestore_max_inline_xattr_size_btrfs
;
6245 fs_xattrs
= cct
->_conf
->filestore_max_inline_xattrs_btrfs
;
6246 fs_xattr_max_value_size
= cct
->_conf
->filestore_max_xattr_value_size_btrfs
;
6250 fs_xattr_size
= cct
->_conf
->filestore_max_inline_xattr_size_other
;
6251 fs_xattrs
= cct
->_conf
->filestore_max_inline_xattrs_other
;
6252 fs_xattr_max_value_size
= cct
->_conf
->filestore_max_xattr_value_size_other
;
6256 // Use override value if set
6257 if (cct
->_conf
->filestore_max_inline_xattr_size
)
6258 m_filestore_max_inline_xattr_size
= cct
->_conf
->filestore_max_inline_xattr_size
;
6260 m_filestore_max_inline_xattr_size
= fs_xattr_size
;
6262 // Use override value if set
6263 if (cct
->_conf
->filestore_max_inline_xattrs
)
6264 m_filestore_max_inline_xattrs
= cct
->_conf
->filestore_max_inline_xattrs
;
6266 m_filestore_max_inline_xattrs
= fs_xattrs
;
6268 // Use override value if set
6269 if (cct
->_conf
->filestore_max_xattr_value_size
)
6270 m_filestore_max_xattr_value_size
= cct
->_conf
->filestore_max_xattr_value_size
;
6272 m_filestore_max_xattr_value_size
= fs_xattr_max_value_size
;
6274 if (m_filestore_max_xattr_value_size
< cct
->_conf
->osd_max_object_name_len
) {
6275 derr
<< "WARNING: max attr value size ("
6276 << m_filestore_max_xattr_value_size
6277 << ") is smaller than osd_max_object_name_len ("
6278 << cct
->_conf
->osd_max_object_name_len
6279 << "). Your backend filesystem appears to not support attrs large "
6280 << "enough to handle the configured max rados name size. You may get "
6281 << "unexpected ENAMETOOLONG errors on rados operations or buggy "
6287 uint64_t FileStore::estimate_objects_overhead(uint64_t num_objects
)
6289 uint64_t res
= num_objects
* blk_size
/ 2; //assumes that each object uses ( in average ) additional 1/2 block due to FS allocation granularity.
6293 int FileStore::apply_layout_settings(const coll_t
&cid
, int target_level
)
6295 dout(20) << __FUNC__
<< ": " << cid
<< " target level: "
6296 << target_level
<< dendl
;
6298 int r
= get_index(cid
, &index
);
6300 dout(10) << "Error getting index for " << cid
<< ": " << cpp_strerror(r
)
6305 return index
->apply_layout_settings(target_level
);
6309 // -- FSSuperblock --
6311 void FSSuperblock::encode(bufferlist
&bl
) const
6313 ENCODE_START(2, 1, bl
);
6314 compat_features
.encode(bl
);
6315 encode(omap_backend
, bl
);
6319 void FSSuperblock::decode(bufferlist::const_iterator
&bl
)
6321 DECODE_START(2, bl
);
6322 compat_features
.decode(bl
);
6324 decode(omap_backend
, bl
);
6326 omap_backend
= "leveldb";
6330 void FSSuperblock::dump(Formatter
*f
) const
6332 f
->open_object_section("compat");
6333 compat_features
.dump(f
);
6334 f
->dump_string("omap_backend", omap_backend
);
6338 void FSSuperblock::generate_test_instances(list
<FSSuperblock
*>& o
)
6341 o
.push_back(new FSSuperblock(z
));
6342 CompatSet::FeatureSet feature_compat
;
6343 CompatSet::FeatureSet feature_ro_compat
;
6344 CompatSet::FeatureSet feature_incompat
;
6345 feature_incompat
.insert(CEPH_FS_FEATURE_INCOMPAT_SHARDS
);
6346 z
.compat_features
= CompatSet(feature_compat
, feature_ro_compat
,
6348 o
.push_back(new FSSuperblock(z
));
6349 z
.omap_backend
= "rocksdb";
6350 o
.push_back(new FSSuperblock(z
));
6354 #define dout_prefix *_dout << "filestore.osr(" << this << ") "
6356 void FileStore::OpSequencer::_register_apply(Op
*o
)
6358 if (o
->registered_apply
) {
6359 dout(20) << __func__
<< " " << o
<< " already registered" << dendl
;
6362 o
->registered_apply
= true;
6363 for (auto& t
: o
->tls
) {
6364 for (auto& i
: t
.get_object_index()) {
6365 uint32_t key
= i
.first
.hobj
.get_hash();
6366 applying
.emplace(make_pair(key
, &i
.first
));
6367 dout(20) << __func__
<< " " << o
<< " " << i
.first
<< " ("
6368 << &i
.first
<< ")" << dendl
;
6373 void FileStore::OpSequencer::_unregister_apply(Op
*o
)
6375 ceph_assert(o
->registered_apply
);
6376 for (auto& t
: o
->tls
) {
6377 for (auto& i
: t
.get_object_index()) {
6378 uint32_t key
= i
.first
.hobj
.get_hash();
6379 auto p
= applying
.find(key
);
6380 bool removed
= false;
6381 while (p
!= applying
.end() &&
6383 if (p
->second
== &i
.first
) {
6384 dout(20) << __func__
<< " " << o
<< " " << i
.first
<< " ("
6385 << &i
.first
<< ")" << dendl
;
6392 ceph_assert(removed
);
6397 void FileStore::OpSequencer::wait_for_apply(const ghobject_t
& oid
)
6399 std::unique_lock l
{qlock
};
6400 uint32_t key
= oid
.hobj
.get_hash();
6403 // search all items in hash slot for a matching object
6404 auto p
= applying
.find(key
);
6405 while (p
!= applying
.end() &&
6407 if (*p
->second
== oid
) {
6408 dout(20) << __func__
<< " " << oid
<< " waiting on " << p
->second
6417 dout(20) << __func__
<< " " << oid
<< " done" << dendl
;