1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
16 #ifndef CEPH_FILESTORE_H
17 #define CEPH_FILESTORE_H
19 #include "include/types.h"
28 #include <boost/scoped_ptr.hpp>
30 #include "include/unordered_map.h"
32 #include "include/assert.h"
34 #include "os/ObjectStore.h"
35 #include "JournalingObjectStore.h"
37 #include "common/Timer.h"
38 #include "common/WorkQueue.h"
39 #include "common/perf_counters.h"
40 #include "common/zipkin_trace.h"
42 #include "common/Mutex.h"
43 #include "HashIndex.h"
44 #include "IndexManager.h"
45 #include "os/ObjectMap.h"
46 #include "SequencerPosition.h"
48 #include "WBThrottle.h"
50 #include "include/uuid.h"
53 // from include/linux/falloc.h:
54 #ifndef FALLOC_FL_PUNCH_HOLE
55 # define FALLOC_FL_PUNCH_HOLE 0x2
58 #if defined(__linux__)
59 # ifndef BTRFS_SUPER_MAGIC
60 #define BTRFS_SUPER_MAGIC 0x9123683EL
62 # ifndef XFS_SUPER_MAGIC
63 #define XFS_SUPER_MAGIC 0x58465342L
65 # ifndef ZFS_SUPER_MAGIC
66 #define ZFS_SUPER_MAGIC 0x2fc12fc1L
71 class FileStoreBackend
;
73 #define CEPH_FS_FEATURE_INCOMPAT_SHARDS CompatSet::Feature(1, "sharded objects")
76 l_filestore_first
= 84000,
77 l_filestore_journal_queue_ops
,
78 l_filestore_journal_queue_bytes
,
79 l_filestore_journal_ops
,
80 l_filestore_journal_bytes
,
81 l_filestore_journal_latency
,
82 l_filestore_journal_wr
,
83 l_filestore_journal_wr_bytes
,
84 l_filestore_journal_full
,
85 l_filestore_committing
,
86 l_filestore_commitcycle
,
87 l_filestore_commitcycle_interval
,
88 l_filestore_commitcycle_latency
,
89 l_filestore_op_queue_max_ops
,
90 l_filestore_op_queue_ops
,
92 l_filestore_op_queue_max_bytes
,
93 l_filestore_op_queue_bytes
,
95 l_filestore_apply_latency
,
96 l_filestore_queue_transaction_latency_avg
,
97 l_filestore_sync_pause_max_lat
,
103 CompatSet compat_features
;
108 void encode(bufferlist
&bl
) const;
109 void decode(bufferlist::iterator
&bl
);
110 void dump(Formatter
*f
) const;
111 static void generate_test_instances(list
<FSSuperblock
*>& o
);
113 WRITE_CLASS_ENCODER(FSSuperblock
)
115 inline ostream
& operator<<(ostream
& out
, const FSSuperblock
& sb
)
117 return out
<< "sb(" << sb
.compat_features
<< "): "
121 class FileStore
: public JournalingObjectStore
,
122 public md_config_obs_t
124 static const uint32_t target_version
= 4;
126 uint32_t get_target_version() {
127 return target_version
;
130 static int get_block_device_fsid(CephContext
* cct
, const string
& path
,
132 struct FSPerfTracker
{
133 PerfCounters::avg_tracker
<uint64_t> os_commit_latency
;
134 PerfCounters::avg_tracker
<uint64_t> os_apply_latency
;
136 objectstore_perf_stat_t
get_cur_stats() const {
137 objectstore_perf_stat_t ret
;
138 ret
.os_commit_latency
= os_commit_latency
.current_avg();
139 ret
.os_apply_latency
= os_apply_latency
.current_avg();
143 void update_from_perfcounters(PerfCounters
&logger
);
145 objectstore_perf_stat_t
get_cur_stats() override
{
146 perf_tracker
.update_from_perfcounters(*logger
);
147 return perf_tracker
.get_cur_stats();
149 const PerfCounters
* get_perf_counters() const override
{
154 string internal_name
; ///< internal name, used to name the perfcounter instance
155 string basedir
, journalpath
;
156 osflagbits_t generic_flags
;
157 std::string current_fn
;
158 std::string current_op_seq_fn
;
159 std::string omap_dir
;
162 size_t blk_size
; ///< fs block size
164 int fsid_fd
, op_fd
, basedir_fd
, current_fd
;
166 FileStoreBackend
*backend
;
168 void create_backend(long f_type
);
170 deque
<uint64_t> snaps
;
172 // Indexed Collections
173 IndexManager index_manager
;
174 int get_index(const coll_t
& c
, Index
*index
);
175 int init_index(const coll_t
& c
);
177 bool _need_temp_object_collection(const coll_t
& cid
, const ghobject_t
& oid
) {
178 // - normal temp case: cid is pg, object is temp (pool < -1)
179 // - hammer temp case: cid is pg (or already temp), object pool is -1
180 return cid
.is_pg() && oid
.hobj
.pool
<= -1;
182 void init_temp_collections();
185 boost::scoped_ptr
<ObjectMap
> object_map
;
188 int get_cdir(const coll_t
& cid
, char *s
, int len
);
190 /// read a uuid from fd
191 int read_fsid(int fd
, uuid_d
*uuid
);
201 Mutex sync_entry_timeo_lock
;
204 list
<Context
*> sync_waiters
;
207 struct SyncThread
: public Thread
{
209 explicit SyncThread(FileStore
*f
) : fs(f
) {}
210 void *entry() override
{
216 // -- op workqueue --
220 vector
<Transaction
> tls
;
221 Context
*onreadable
, *onreadable_sync
;
224 ZTracer::Trace trace
;
226 class OpSequencer
: public Sequencer_impl
{
227 Mutex qlock
; // to protect q, for benefit of flush (peek/dequeue also protected by lock)
230 list
<pair
<uint64_t, Context
*> > flush_commit_waiters
;
234 Mutex apply_lock
; // for apply mutual exclusion
237 /// get_max_uncompleted
238 bool _get_max_uncompleted(
239 uint64_t *seq
///< [out] max uncompleted seq
241 assert(qlock
.is_locked());
244 if (q
.empty() && jq
.empty())
249 if (!jq
.empty() && jq
.back() > *seq
)
253 } /// @returns true if both queues are empty
255 /// get_min_uncompleted
256 bool _get_min_uncompleted(
257 uint64_t *seq
///< [out] min uncompleted seq
259 assert(qlock
.is_locked());
262 if (q
.empty() && jq
.empty())
266 *seq
= q
.front()->op
;
267 if (!jq
.empty() && jq
.front() < *seq
)
271 } /// @returns true if both queues are empty
273 void _wake_flush_waiters(list
<Context
*> *to_queue
) {
275 if (_get_min_uncompleted(&seq
))
278 for (list
<pair
<uint64_t, Context
*> >::iterator i
=
279 flush_commit_waiters
.begin();
280 i
!= flush_commit_waiters
.end() && i
->first
< seq
;
281 flush_commit_waiters
.erase(i
++)) {
282 to_queue
->push_back(i
->second
);
286 void queue_journal(uint64_t s
) {
287 Mutex::Locker
l(qlock
);
290 void dequeue_journal(list
<Context
*> *to_queue
) {
291 Mutex::Locker
l(qlock
);
294 _wake_flush_waiters(to_queue
);
297 Mutex::Locker
l(qlock
);
299 o
->trace
.keyval("queue depth", q
.size());
302 Mutex::Locker
l(qlock
);
303 assert(apply_lock
.is_locked());
307 Op
*dequeue(list
<Context
*> *to_queue
) {
309 assert(apply_lock
.is_locked());
310 Mutex::Locker
l(qlock
);
315 _wake_flush_waiters(to_queue
);
319 void flush() override
{
320 Mutex::Locker
l(qlock
);
322 while (cct
->_conf
->filestore_blackhole
)
323 cond
.Wait(qlock
); // wait forever
326 // get max for journal _or_ op queues
330 if (!jq
.empty() && jq
.back() > seq
)
334 // everything prior to our watermark to drain through either/both queues
335 while ((!q
.empty() && q
.front()->op
<= seq
) ||
336 (!jq
.empty() && jq
.front() <= seq
))
340 bool flush_commit(Context
*c
) override
{
341 Mutex::Locker
l(qlock
);
343 if (_get_max_uncompleted(&seq
)) {
346 flush_commit_waiters
.push_back(make_pair(seq
, c
));
351 OpSequencer(CephContext
* cct
, int i
)
352 : Sequencer_impl(cct
),
353 qlock("FileStore::OpSequencer::qlock", false, false),
355 apply_lock("FileStore::OpSequencer::apply_lock", false, false),
357 ~OpSequencer() override
{
361 const string
& get_name() const {
362 return parent
->get_name();
366 friend ostream
& operator<<(ostream
& out
, const OpSequencer
& s
);
369 WBThrottle wbthrottle
;
371 std::atomic
<int64_t> next_osr_id
= { 0 };
372 bool m_disable_wbthrottle
;
373 deque
<OpSequencer
*> op_queue
;
374 BackoffThrottle throttle_ops
, throttle_bytes
;
375 const int m_ondisk_finisher_num
;
376 const int m_apply_finisher_num
;
377 vector
<Finisher
*> ondisk_finishers
;
378 vector
<Finisher
*> apply_finishers
;
381 struct OpWQ
: public ThreadPool::WorkQueue
<OpSequencer
> {
383 OpWQ(FileStore
*fs
, time_t timeout
, time_t suicide_timeout
, ThreadPool
*tp
)
384 : ThreadPool::WorkQueue
<OpSequencer
>("FileStore::OpWQ", timeout
, suicide_timeout
, tp
), store(fs
) {}
386 bool _enqueue(OpSequencer
*osr
) override
{
387 store
->op_queue
.push_back(osr
);
390 void _dequeue(OpSequencer
*o
) override
{
393 bool _empty() override
{
394 return store
->op_queue
.empty();
396 OpSequencer
*_dequeue() override
{
397 if (store
->op_queue
.empty())
399 OpSequencer
*osr
= store
->op_queue
.front();
400 store
->op_queue
.pop_front();
403 void _process(OpSequencer
*osr
, ThreadPool::TPHandle
&handle
) override
{
404 store
->_do_op(osr
, handle
);
406 void _process_finish(OpSequencer
*osr
) override
{
407 store
->_finish_op(osr
);
409 void _clear() override
{
410 assert(store
->op_queue
.empty());
414 void _do_op(OpSequencer
*o
, ThreadPool::TPHandle
&handle
);
415 void _finish_op(OpSequencer
*o
);
416 Op
*build_op(vector
<Transaction
>& tls
,
417 Context
*onreadable
, Context
*onreadable_sync
,
418 TrackedOpRef osd_op
);
419 void queue_op(OpSequencer
*osr
, Op
*o
);
420 void op_queue_reserve_throttle(Op
*o
);
421 void op_queue_release_throttle(Op
*o
);
422 void _journaled_ahead(OpSequencer
*osr
, Op
*o
, Context
*ondisk
);
423 friend struct C_JournaledAhead
;
427 PerfCounters
*logger
;
429 ZTracer::Endpoint trace_endpoint
;
432 int lfn_find(const ghobject_t
& oid
, const Index
& index
,
433 IndexedPath
*path
= NULL
);
434 int lfn_truncate(const coll_t
& cid
, const ghobject_t
& oid
, off_t length
);
435 int lfn_stat(const coll_t
& cid
, const ghobject_t
& oid
, struct stat
*buf
);
438 const ghobject_t
& oid
,
443 void lfn_close(FDRef fd
);
444 int lfn_link(const coll_t
& c
, const coll_t
& newcid
, const ghobject_t
& o
, const ghobject_t
& newoid
) ;
445 int lfn_unlink(const coll_t
& cid
, const ghobject_t
& o
, const SequencerPosition
&spos
,
446 bool force_clear_omap
=false);
449 FileStore(CephContext
* cct
, const std::string
&base
, const std::string
&jdev
,
450 osflagbits_t flags
= 0,
451 const char *internal_name
= "filestore", bool update_to
=false);
452 ~FileStore() override
;
454 string
get_type() override
{
459 int _sanity_check_fs();
461 bool test_mount_in_use() override
;
462 int read_op_seq(uint64_t *seq
);
463 int write_op_seq(int, uint64_t seq
);
464 int mount() override
;
465 int umount() override
;
467 int validate_hobject_key(const hobject_t
&obj
) const override
;
469 unsigned get_max_attr_name_length() override
{
470 // xattr limit is 128; leave room for our prefixes (user.ceph._),
471 // some margin, and cap at 100
475 int mkjournal() override
;
476 bool wants_journal() override
{
479 bool allows_journal() override
{
482 bool needs_journal() override
{
486 bool is_rotational() override
;
488 void dump_perf_counters(Formatter
*f
) override
{
489 f
->open_object_section("perf_counters");
490 logger
->dump_formatted(f
, false);
494 int write_version_stamp();
495 int version_stamp_is_valid(uint32_t *version
);
496 int update_version_stamp();
497 int upgrade() override
;
499 bool can_sort_nibblewise() override
{
500 return true; // i support legacy sort order
503 void collect_metadata(map
<string
,string
> *pm
) override
;
505 int statfs(struct store_statfs_t
*buf
) override
;
507 int _do_transactions(
508 vector
<Transaction
> &tls
, uint64_t op_seq
,
509 ThreadPool::TPHandle
*handle
);
510 int do_transactions(vector
<Transaction
> &tls
, uint64_t op_seq
) override
{
511 return _do_transactions(tls
, op_seq
, 0);
513 void _do_transaction(
514 Transaction
& t
, uint64_t op_seq
, int trans_num
,
515 ThreadPool::TPHandle
*handle
);
517 int queue_transactions(Sequencer
*osr
, vector
<Transaction
>& tls
,
518 TrackedOpRef op
= TrackedOpRef(),
519 ThreadPool::TPHandle
*handle
= NULL
) override
;
522 * set replay guard xattr on given file
524 * This will ensure that we will not replay this (or any previous) operation
525 * against this particular inode/object.
527 * @param fd open file descriptor for the file/object
528 * @param spos sequencer position of the last operation we should not replay
530 void _set_replay_guard(int fd
,
531 const SequencerPosition
& spos
,
532 const ghobject_t
*oid
=0,
533 bool in_progress
=false);
534 void _set_replay_guard(const coll_t
& cid
,
535 const SequencerPosition
& spos
,
537 void _set_global_replay_guard(const coll_t
& cid
,
538 const SequencerPosition
&spos
);
540 /// close a replay guard opened with in_progress=true
541 void _close_replay_guard(int fd
, const SequencerPosition
& spos
,
542 const ghobject_t
*oid
=0);
543 void _close_replay_guard(const coll_t
& cid
, const SequencerPosition
& spos
);
546 * check replay guard xattr on given file
548 * Check the current position against any marker on the file that
549 * indicates which operations have already been applied. If the
550 * current or a newer operation has been marked as applied, we
551 * should not replay the current operation again.
553 * If we are not replaying the journal, we already return true. It
554 * is only on replay that we might return false, indicated that the
555 * operation should not be performed (again).
557 * @param fd open fd on the file/object in question
558 * @param spos sequencerposition for an operation we could apply/replay
559 * @return 1 if we can apply (maybe replay) this operation, -1 if spos has already been applied, 0 if it was in progress
561 int _check_replay_guard(int fd
, const SequencerPosition
& spos
);
562 int _check_replay_guard(const coll_t
& cid
, const SequencerPosition
& spos
);
563 int _check_replay_guard(const coll_t
& cid
, const ghobject_t
&oid
, const SequencerPosition
& pos
);
564 int _check_global_replay_guard(const coll_t
& cid
, const SequencerPosition
& spos
);
566 // ------------------
568 int pick_object_revision_lt(ghobject_t
& oid
) {
571 using ObjectStore::exists
;
572 bool exists(const coll_t
& cid
, const ghobject_t
& oid
) override
;
573 using ObjectStore::stat
;
576 const ghobject_t
& oid
,
578 bool allow_eio
= false) override
;
579 using ObjectStore::set_collection_opts
;
580 int set_collection_opts(
582 const pool_opts_t
& opts
) override
;
583 using ObjectStore::read
;
586 const ghobject_t
& oid
,
590 uint32_t op_flags
= 0) override
;
591 int _do_fiemap(int fd
, uint64_t offset
, size_t len
,
592 map
<uint64_t, uint64_t> *m
);
593 int _do_seek_hole_data(int fd
, uint64_t offset
, size_t len
,
594 map
<uint64_t, uint64_t> *m
);
595 using ObjectStore::fiemap
;
596 int fiemap(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t offset
, size_t len
, bufferlist
& bl
) override
;
597 int fiemap(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t offset
, size_t len
, map
<uint64_t, uint64_t>& destmap
) override
;
599 int _touch(const coll_t
& cid
, const ghobject_t
& oid
);
600 int _write(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t offset
, size_t len
,
601 const bufferlist
& bl
, uint32_t fadvise_flags
= 0);
602 int _zero(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t offset
, size_t len
);
603 int _truncate(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t size
);
604 int _clone(const coll_t
& cid
, const ghobject_t
& oldoid
, const ghobject_t
& newoid
,
605 const SequencerPosition
& spos
);
606 int _clone_range(const coll_t
& oldcid
, const ghobject_t
& oldoid
, const coll_t
& newcid
, const ghobject_t
& newoid
,
607 uint64_t srcoff
, uint64_t len
, uint64_t dstoff
,
608 const SequencerPosition
& spos
);
609 int _do_clone_range(int from
, int to
, uint64_t srcoff
, uint64_t len
, uint64_t dstoff
);
610 int _do_sparse_copy_range(int from
, int to
, uint64_t srcoff
, uint64_t len
, uint64_t dstoff
);
611 int _do_copy_range(int from
, int to
, uint64_t srcoff
, uint64_t len
, uint64_t dstoff
, bool skip_sloppycrc
=false);
612 int _remove(const coll_t
& cid
, const ghobject_t
& oid
, const SequencerPosition
&spos
);
614 int _fgetattr(int fd
, const char *name
, bufferptr
& bp
);
615 int _fgetattrs(int fd
, map
<string
,bufferptr
>& aset
);
616 int _fsetattrs(int fd
, map
<string
, bufferptr
> &aset
);
620 void do_force_sync();
621 void start_sync(Context
*onsafe
);
623 void _flush_op_queue();
625 void sync_and_flush();
627 int flush_journal() override
;
628 int dump_journal(ostream
& out
) override
;
630 void set_fsid(uuid_d u
) override
{
633 uuid_d
get_fsid() override
{ return fsid
; }
635 uint64_t estimate_objects_overhead(uint64_t num_objects
) override
;
637 // DEBUG read error injection, an object is removed from both on delete()
638 Mutex read_error_lock
;
639 set
<ghobject_t
> data_error_set
; // read() will return -EIO
640 set
<ghobject_t
> mdata_error_set
; // getattr(),stat() will return -EIO
641 void inject_data_error(const ghobject_t
&oid
) override
;
642 void inject_mdata_error(const ghobject_t
&oid
) override
;
644 void compact() override
{
646 object_map
->compact();
649 void debug_obj_on_delete(const ghobject_t
&oid
);
650 bool debug_data_eio(const ghobject_t
&oid
);
651 bool debug_mdata_eio(const ghobject_t
&oid
);
653 int snapshot(const string
& name
) override
;
656 using ObjectStore::getattr
;
657 using ObjectStore::getattrs
;
658 int getattr(const coll_t
& cid
, const ghobject_t
& oid
, const char *name
, bufferptr
&bp
) override
;
659 int getattrs(const coll_t
& cid
, const ghobject_t
& oid
, map
<string
,bufferptr
>& aset
) override
;
661 int _setattrs(const coll_t
& cid
, const ghobject_t
& oid
, map
<string
,bufferptr
>& aset
,
662 const SequencerPosition
&spos
);
663 int _rmattr(const coll_t
& cid
, const ghobject_t
& oid
, const char *name
,
664 const SequencerPosition
&spos
);
665 int _rmattrs(const coll_t
& cid
, const ghobject_t
& oid
,
666 const SequencerPosition
&spos
);
668 int _collection_remove_recursive(const coll_t
&cid
,
669 const SequencerPosition
&spos
);
671 int _collection_set_bits(const coll_t
& cid
, int bits
);
674 using ObjectStore::collection_list
;
675 int collection_bits(const coll_t
& c
) override
;
676 int collection_list(const coll_t
& c
,
677 const ghobject_t
& start
, const ghobject_t
& end
, int max
,
678 vector
<ghobject_t
> *ls
, ghobject_t
*next
) override
;
679 int list_collections(vector
<coll_t
>& ls
) override
;
680 int list_collections(vector
<coll_t
>& ls
, bool include_temp
);
681 int collection_stat(const coll_t
& c
, struct stat
*st
);
682 bool collection_exists(const coll_t
& c
) override
;
683 int collection_empty(const coll_t
& c
, bool *empty
) override
;
685 // omap (see ObjectStore.h for documentation)
686 using ObjectStore::omap_get
;
687 int omap_get(const coll_t
& c
, const ghobject_t
&oid
, bufferlist
*header
,
688 map
<string
, bufferlist
> *out
) override
;
689 using ObjectStore::omap_get_header
;
692 const ghobject_t
&oid
,
694 bool allow_eio
= false) override
;
695 using ObjectStore::omap_get_keys
;
696 int omap_get_keys(const coll_t
& c
, const ghobject_t
&oid
, set
<string
> *keys
) override
;
697 using ObjectStore::omap_get_values
;
698 int omap_get_values(const coll_t
& c
, const ghobject_t
&oid
, const set
<string
> &keys
,
699 map
<string
, bufferlist
> *out
) override
;
700 using ObjectStore::omap_check_keys
;
701 int omap_check_keys(const coll_t
& c
, const ghobject_t
&oid
, const set
<string
> &keys
,
702 set
<string
> *out
) override
;
703 using ObjectStore::get_omap_iterator
;
704 ObjectMap::ObjectMapIterator
get_omap_iterator(const coll_t
& c
, const ghobject_t
&oid
) override
;
706 int _create_collection(const coll_t
& c
, int bits
,
707 const SequencerPosition
&spos
);
708 int _destroy_collection(const coll_t
& c
);
710 * Give an expected number of objects hint to the collection.
712 * @param c - collection id.
713 * @param pg_num - pg number of the pool this collection belongs to
714 * @param expected_num_objs - expected number of objects in this collection
715 * @param spos - sequence position
717 * @return 0 on success, an error code otherwise
719 int _collection_hint_expected_num_objs(const coll_t
& c
, uint32_t pg_num
,
720 uint64_t expected_num_objs
,
721 const SequencerPosition
&spos
);
722 int _collection_add(const coll_t
& c
, const coll_t
& ocid
, const ghobject_t
& oid
,
723 const SequencerPosition
& spos
);
724 int _collection_move_rename(const coll_t
& oldcid
, const ghobject_t
& oldoid
,
725 coll_t c
, const ghobject_t
& o
,
726 const SequencerPosition
& spos
,
727 bool ignore_enoent
= false);
729 int _set_alloc_hint(const coll_t
& cid
, const ghobject_t
& oid
,
730 uint64_t expected_object_size
,
731 uint64_t expected_write_size
);
733 void dump_start(const std::string
& file
);
735 void dump_transactions(vector
<Transaction
>& ls
, uint64_t seq
, OpSequencer
*osr
);
737 virtual int apply_layout_settings(const coll_t
&cid
);
740 void _inject_failure();
743 int _omap_clear(const coll_t
& cid
, const ghobject_t
&oid
,
744 const SequencerPosition
&spos
);
745 int _omap_setkeys(const coll_t
& cid
, const ghobject_t
&oid
,
746 const map
<string
, bufferlist
> &aset
,
747 const SequencerPosition
&spos
);
748 int _omap_rmkeys(const coll_t
& cid
, const ghobject_t
&oid
, const set
<string
> &keys
,
749 const SequencerPosition
&spos
);
750 int _omap_rmkeyrange(const coll_t
& cid
, const ghobject_t
&oid
,
751 const string
& first
, const string
& last
,
752 const SequencerPosition
&spos
);
753 int _omap_setheader(const coll_t
& cid
, const ghobject_t
&oid
, const bufferlist
&bl
,
754 const SequencerPosition
&spos
);
755 int _split_collection(const coll_t
& cid
, uint32_t bits
, uint32_t rem
, coll_t dest
,
756 const SequencerPosition
&spos
);
757 int _split_collection_create(const coll_t
& cid
, uint32_t bits
, uint32_t rem
,
759 const SequencerPosition
&spos
);
761 const char** get_tracked_conf_keys() const override
;
762 void handle_conf_change(const struct md_config_t
*conf
,
763 const std::set
<std::string
> &changed
) override
;
764 int set_throttle_params();
765 float m_filestore_commit_timeout
;
766 bool m_filestore_journal_parallel
;
767 bool m_filestore_journal_trailing
;
768 bool m_filestore_journal_writeahead
;
769 int m_filestore_fiemap_threshold
;
770 double m_filestore_max_sync_interval
;
771 double m_filestore_min_sync_interval
;
772 bool m_filestore_fail_eio
;
773 bool m_filestore_fadvise
;
775 bool m_journal_dio
, m_journal_aio
, m_journal_force_aio
;
776 std::string m_osd_rollback_to_cluster_snap
;
777 bool m_osd_use_stale_snap
;
778 bool m_filestore_do_dump
;
779 std::ofstream m_filestore_dump
;
780 JSONFormatter m_filestore_dump_fmt
;
781 std::atomic
<int64_t> m_filestore_kill_at
= { 0 };
782 bool m_filestore_sloppy_crc
;
783 int m_filestore_sloppy_crc_block_size
;
784 uint64_t m_filestore_max_alloc_hint_size
;
787 //Determined xattr handling based on fs type
788 void set_xattr_limits_via_conf();
789 uint32_t m_filestore_max_inline_xattr_size
;
790 uint32_t m_filestore_max_inline_xattrs
;
791 uint32_t m_filestore_max_xattr_value_size
;
793 FSSuperblock superblock
;
798 * Write superblock to persisent storage
800 * return value: 0 on success, otherwise negative errno
802 int write_superblock();
807 * Fill in FileStore::superblock by reading persistent storage
809 * return value: 0 on success, otherwise negative errno
811 int read_superblock();
813 friend class FileStoreBackend
;
814 friend class TestFileStore
;
817 ostream
& operator<<(ostream
& out
, const FileStore::OpSequencer
& s
);
821 class FileStoreBackend
{
823 FileStore
*filestore
;
825 int get_basedir_fd() {
826 return filestore
->basedir_fd
;
828 int get_current_fd() {
829 return filestore
->current_fd
;
832 return filestore
->op_fd
;
834 size_t get_blksize() {
835 return filestore
->blk_size
;
837 const string
& get_basedir_path() {
838 return filestore
->basedir
;
840 const string
& get_current_path() {
841 return filestore
->current_fn
;
843 int _copy_range(int from
, int to
, uint64_t srcoff
, uint64_t len
, uint64_t dstoff
) {
844 if (has_fiemap() || has_seek_data_hole()) {
845 return filestore
->_do_sparse_copy_range(from
, to
, srcoff
, len
, dstoff
);
847 return filestore
->_do_copy_range(from
, to
, srcoff
, len
, dstoff
);
850 int get_crc_block_size() {
851 return filestore
->m_filestore_sloppy_crc_block_size
;
855 explicit FileStoreBackend(FileStore
*fs
) : filestore(fs
) {}
856 virtual ~FileStoreBackend() {}
858 CephContext
* cct() const {
859 return filestore
->cct
;
862 static FileStoreBackend
*create(long f_type
, FileStore
*fs
);
864 virtual const char *get_name() = 0;
865 virtual int detect_features() = 0;
866 virtual int create_current() = 0;
867 virtual bool can_checkpoint() = 0;
868 virtual int list_checkpoints(list
<string
>& ls
) = 0;
869 virtual int create_checkpoint(const string
& name
, uint64_t *cid
) = 0;
870 virtual int sync_checkpoint(uint64_t id
) = 0;
871 virtual int rollback_to(const string
& name
) = 0;
872 virtual int destroy_checkpoint(const string
& name
) = 0;
873 virtual int syncfs() = 0;
874 virtual bool has_fiemap() = 0;
875 virtual bool has_seek_data_hole() = 0;
876 virtual bool is_rotational() = 0;
877 virtual int do_fiemap(int fd
, off_t start
, size_t len
, struct fiemap
**pfiemap
) = 0;
878 virtual int clone_range(int from
, int to
, uint64_t srcoff
, uint64_t len
, uint64_t dstoff
) = 0;
879 virtual int set_alloc_hint(int fd
, uint64_t hint
) = 0;
880 virtual bool has_splice() const = 0;
882 // hooks for (sloppy) crc tracking
883 virtual int _crc_update_write(int fd
, loff_t off
, size_t len
, const bufferlist
& bl
) = 0;
884 virtual int _crc_update_truncate(int fd
, loff_t off
) = 0;
885 virtual int _crc_update_zero(int fd
, loff_t off
, size_t len
) = 0;
886 virtual int _crc_update_clone_range(int srcfd
, int destfd
,
887 loff_t srcoff
, size_t len
, loff_t dstoff
) = 0;
888 virtual int _crc_verify_read(int fd
, loff_t off
, size_t len
, const bufferlist
& bl
,