]> git.proxmox.com Git - ceph.git/blob - ceph/src/os/filestore/FileStore.h
update sources to v12.1.1
[ceph.git] / ceph / src / os / filestore / FileStore.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15
16 #ifndef CEPH_FILESTORE_H
17 #define CEPH_FILESTORE_H
18
19 #include "include/types.h"
20
21 #include <map>
22 #include <deque>
23 #include <atomic>
24 #include <fstream>
25
26 using namespace std;
27
28 #include <boost/scoped_ptr.hpp>
29
30 #include "include/unordered_map.h"
31
32 #include "include/assert.h"
33
34 #include "os/ObjectStore.h"
35 #include "JournalingObjectStore.h"
36
37 #include "common/Timer.h"
38 #include "common/WorkQueue.h"
39 #include "common/perf_counters.h"
40 #include "common/zipkin_trace.h"
41
42 #include "common/Mutex.h"
43 #include "HashIndex.h"
44 #include "IndexManager.h"
45 #include "os/ObjectMap.h"
46 #include "SequencerPosition.h"
47 #include "FDCache.h"
48 #include "WBThrottle.h"
49
50 #include "include/uuid.h"
51
52
53 // from include/linux/falloc.h:
54 #ifndef FALLOC_FL_PUNCH_HOLE
55 # define FALLOC_FL_PUNCH_HOLE 0x2
56 #endif
57
58 #if defined(__linux__)
59 # ifndef BTRFS_SUPER_MAGIC
60 #define BTRFS_SUPER_MAGIC 0x9123683EL
61 # endif
62 # ifndef XFS_SUPER_MAGIC
63 #define XFS_SUPER_MAGIC 0x58465342L
64 # endif
65 # ifndef ZFS_SUPER_MAGIC
66 #define ZFS_SUPER_MAGIC 0x2fc12fc1L
67 # endif
68 #endif
69
70
71 class FileStoreBackend;
72
73 #define CEPH_FS_FEATURE_INCOMPAT_SHARDS CompatSet::Feature(1, "sharded objects")
74
75 enum {
76 l_filestore_first = 84000,
77 l_filestore_journal_queue_ops,
78 l_filestore_journal_queue_bytes,
79 l_filestore_journal_ops,
80 l_filestore_journal_bytes,
81 l_filestore_journal_latency,
82 l_filestore_journal_wr,
83 l_filestore_journal_wr_bytes,
84 l_filestore_journal_full,
85 l_filestore_committing,
86 l_filestore_commitcycle,
87 l_filestore_commitcycle_interval,
88 l_filestore_commitcycle_latency,
89 l_filestore_op_queue_max_ops,
90 l_filestore_op_queue_ops,
91 l_filestore_ops,
92 l_filestore_op_queue_max_bytes,
93 l_filestore_op_queue_bytes,
94 l_filestore_bytes,
95 l_filestore_apply_latency,
96 l_filestore_queue_transaction_latency_avg,
97 l_filestore_sync_pause_max_lat,
98 l_filestore_last,
99 };
100
101 class FSSuperblock {
102 public:
103 CompatSet compat_features;
104 string omap_backend;
105
106 FSSuperblock() { }
107
108 void encode(bufferlist &bl) const;
109 void decode(bufferlist::iterator &bl);
110 void dump(Formatter *f) const;
111 static void generate_test_instances(list<FSSuperblock*>& o);
112 };
113 WRITE_CLASS_ENCODER(FSSuperblock)
114
115 inline ostream& operator<<(ostream& out, const FSSuperblock& sb)
116 {
117 return out << "sb(" << sb.compat_features << "): "
118 << sb.omap_backend;
119 }
120
121 class FileStore : public JournalingObjectStore,
122 public md_config_obs_t
123 {
124 static const uint32_t target_version = 4;
125 public:
126 uint32_t get_target_version() {
127 return target_version;
128 }
129
130 static int get_block_device_fsid(CephContext* cct, const string& path,
131 uuid_d *fsid);
132 struct FSPerfTracker {
133 PerfCounters::avg_tracker<uint64_t> os_commit_latency;
134 PerfCounters::avg_tracker<uint64_t> os_apply_latency;
135
136 objectstore_perf_stat_t get_cur_stats() const {
137 objectstore_perf_stat_t ret;
138 ret.os_commit_latency = os_commit_latency.avg();
139 ret.os_apply_latency = os_apply_latency.avg();
140 return ret;
141 }
142
143 void update_from_perfcounters(PerfCounters &logger);
144 } perf_tracker;
145 objectstore_perf_stat_t get_cur_stats() override {
146 perf_tracker.update_from_perfcounters(*logger);
147 return perf_tracker.get_cur_stats();
148 }
149 const PerfCounters* get_perf_counters() const override {
150 return logger;
151 }
152
153 private:
154 string internal_name; ///< internal name, used to name the perfcounter instance
155 string basedir, journalpath;
156 osflagbits_t generic_flags;
157 std::string current_fn;
158 std::string current_op_seq_fn;
159 std::string omap_dir;
160 uuid_d fsid;
161
162 size_t blk_size; ///< fs block size
163
164 int fsid_fd, op_fd, basedir_fd, current_fd;
165
166 FileStoreBackend *backend;
167
168 void create_backend(long f_type);
169
170 deque<uint64_t> snaps;
171
172 // Indexed Collections
173 IndexManager index_manager;
174 int get_index(const coll_t& c, Index *index);
175 int init_index(const coll_t& c);
176
177 bool _need_temp_object_collection(const coll_t& cid, const ghobject_t& oid) {
178 // - normal temp case: cid is pg, object is temp (pool < -1)
179 // - hammer temp case: cid is pg (or already temp), object pool is -1
180 return cid.is_pg() && oid.hobj.pool <= -1;
181 }
182 void init_temp_collections();
183
184 // ObjectMap
185 boost::scoped_ptr<ObjectMap> object_map;
186
187 // helper fns
188 int get_cdir(const coll_t& cid, char *s, int len);
189
190 /// read a uuid from fd
191 int read_fsid(int fd, uuid_d *uuid);
192
193 /// lock fsid_fd
194 int lock_fsid();
195
196 // sync thread
197 Mutex lock;
198 bool force_sync;
199 Cond sync_cond;
200
201 Mutex sync_entry_timeo_lock;
202 SafeTimer timer;
203
204 list<Context*> sync_waiters;
205 bool stop;
206 void sync_entry();
207 struct SyncThread : public Thread {
208 FileStore *fs;
209 explicit SyncThread(FileStore *f) : fs(f) {}
210 void *entry() override {
211 fs->sync_entry();
212 return 0;
213 }
214 } sync_thread;
215
216 // -- op workqueue --
217 struct Op {
218 utime_t start;
219 uint64_t op;
220 vector<Transaction> tls;
221 Context *onreadable, *onreadable_sync;
222 uint64_t ops, bytes;
223 TrackedOpRef osd_op;
224 ZTracer::Trace trace;
225 };
226 class OpSequencer : public Sequencer_impl {
227 Mutex qlock; // to protect q, for benefit of flush (peek/dequeue also protected by lock)
228 list<Op*> q;
229 list<uint64_t> jq;
230 list<pair<uint64_t, Context*> > flush_commit_waiters;
231 Cond cond;
232 public:
233 Sequencer *parent;
234 Mutex apply_lock; // for apply mutual exclusion
235 int id;
236
237 /// get_max_uncompleted
238 bool _get_max_uncompleted(
239 uint64_t *seq ///< [out] max uncompleted seq
240 ) {
241 assert(qlock.is_locked());
242 assert(seq);
243 *seq = 0;
244 if (q.empty() && jq.empty())
245 return true;
246
247 if (!q.empty())
248 *seq = q.back()->op;
249 if (!jq.empty() && jq.back() > *seq)
250 *seq = jq.back();
251
252 return false;
253 } /// @returns true if both queues are empty
254
255 /// get_min_uncompleted
256 bool _get_min_uncompleted(
257 uint64_t *seq ///< [out] min uncompleted seq
258 ) {
259 assert(qlock.is_locked());
260 assert(seq);
261 *seq = 0;
262 if (q.empty() && jq.empty())
263 return true;
264
265 if (!q.empty())
266 *seq = q.front()->op;
267 if (!jq.empty() && jq.front() < *seq)
268 *seq = jq.front();
269
270 return false;
271 } /// @returns true if both queues are empty
272
273 void _wake_flush_waiters(list<Context*> *to_queue) {
274 uint64_t seq;
275 if (_get_min_uncompleted(&seq))
276 seq = -1;
277
278 for (list<pair<uint64_t, Context*> >::iterator i =
279 flush_commit_waiters.begin();
280 i != flush_commit_waiters.end() && i->first < seq;
281 flush_commit_waiters.erase(i++)) {
282 to_queue->push_back(i->second);
283 }
284 }
285
286 void queue_journal(uint64_t s) {
287 Mutex::Locker l(qlock);
288 jq.push_back(s);
289 }
290 void dequeue_journal(list<Context*> *to_queue) {
291 Mutex::Locker l(qlock);
292 jq.pop_front();
293 cond.Signal();
294 _wake_flush_waiters(to_queue);
295 }
296 void queue(Op *o) {
297 Mutex::Locker l(qlock);
298 q.push_back(o);
299 o->trace.keyval("queue depth", q.size());
300 }
301 Op *peek_queue() {
302 Mutex::Locker l(qlock);
303 assert(apply_lock.is_locked());
304 return q.front();
305 }
306
307 Op *dequeue(list<Context*> *to_queue) {
308 assert(to_queue);
309 assert(apply_lock.is_locked());
310 Mutex::Locker l(qlock);
311 Op *o = q.front();
312 q.pop_front();
313 cond.Signal();
314
315 _wake_flush_waiters(to_queue);
316 return o;
317 }
318
319 void flush() override {
320 Mutex::Locker l(qlock);
321
322 while (cct->_conf->filestore_blackhole)
323 cond.Wait(qlock); // wait forever
324
325
326 // get max for journal _or_ op queues
327 uint64_t seq = 0;
328 if (!q.empty())
329 seq = q.back()->op;
330 if (!jq.empty() && jq.back() > seq)
331 seq = jq.back();
332
333 if (seq) {
334 // everything prior to our watermark to drain through either/both queues
335 while ((!q.empty() && q.front()->op <= seq) ||
336 (!jq.empty() && jq.front() <= seq))
337 cond.Wait(qlock);
338 }
339 }
340 bool flush_commit(Context *c) override {
341 Mutex::Locker l(qlock);
342 uint64_t seq = 0;
343 if (_get_max_uncompleted(&seq)) {
344 return true;
345 } else {
346 flush_commit_waiters.push_back(make_pair(seq, c));
347 return false;
348 }
349 }
350
351 OpSequencer(CephContext* cct, int i)
352 : Sequencer_impl(cct),
353 qlock("FileStore::OpSequencer::qlock", false, false),
354 parent(0),
355 apply_lock("FileStore::OpSequencer::apply_lock", false, false),
356 id(i) {}
357 ~OpSequencer() override {
358 assert(q.empty());
359 }
360
361 const string& get_name() const {
362 return parent->get_name();
363 }
364 };
365
366 friend ostream& operator<<(ostream& out, const OpSequencer& s);
367
368 FDCache fdcache;
369 WBThrottle wbthrottle;
370
371 std::atomic<int64_t> next_osr_id = { 0 };
372 bool m_disable_wbthrottle;
373 deque<OpSequencer*> op_queue;
374 BackoffThrottle throttle_ops, throttle_bytes;
375 const int m_ondisk_finisher_num;
376 const int m_apply_finisher_num;
377 vector<Finisher*> ondisk_finishers;
378 vector<Finisher*> apply_finishers;
379
380 ThreadPool op_tp;
381 struct OpWQ : public ThreadPool::WorkQueue<OpSequencer> {
382 FileStore *store;
383 OpWQ(FileStore *fs, time_t timeout, time_t suicide_timeout, ThreadPool *tp)
384 : ThreadPool::WorkQueue<OpSequencer>("FileStore::OpWQ", timeout, suicide_timeout, tp), store(fs) {}
385
386 bool _enqueue(OpSequencer *osr) override {
387 store->op_queue.push_back(osr);
388 return true;
389 }
390 void _dequeue(OpSequencer *o) override {
391 ceph_abort();
392 }
393 bool _empty() override {
394 return store->op_queue.empty();
395 }
396 OpSequencer *_dequeue() override {
397 if (store->op_queue.empty())
398 return NULL;
399 OpSequencer *osr = store->op_queue.front();
400 store->op_queue.pop_front();
401 return osr;
402 }
403 void _process(OpSequencer *osr, ThreadPool::TPHandle &handle) override {
404 store->_do_op(osr, handle);
405 }
406 void _process_finish(OpSequencer *osr) override {
407 store->_finish_op(osr);
408 }
409 void _clear() override {
410 assert(store->op_queue.empty());
411 }
412 } op_wq;
413
414 void _do_op(OpSequencer *o, ThreadPool::TPHandle &handle);
415 void _finish_op(OpSequencer *o);
416 Op *build_op(vector<Transaction>& tls,
417 Context *onreadable, Context *onreadable_sync,
418 TrackedOpRef osd_op);
419 void queue_op(OpSequencer *osr, Op *o);
420 void op_queue_reserve_throttle(Op *o);
421 void op_queue_release_throttle(Op *o);
422 void _journaled_ahead(OpSequencer *osr, Op *o, Context *ondisk);
423 friend struct C_JournaledAhead;
424
425 void new_journal();
426
427 PerfCounters *logger;
428
429 ZTracer::Endpoint trace_endpoint;
430
431 public:
432 int lfn_find(const ghobject_t& oid, const Index& index,
433 IndexedPath *path = NULL);
434 int lfn_truncate(const coll_t& cid, const ghobject_t& oid, off_t length);
435 int lfn_stat(const coll_t& cid, const ghobject_t& oid, struct stat *buf);
436 int lfn_open(
437 const coll_t& cid,
438 const ghobject_t& oid,
439 bool create,
440 FDRef *outfd,
441 Index *index = 0);
442
443 void lfn_close(FDRef fd);
444 int lfn_link(const coll_t& c, const coll_t& newcid, const ghobject_t& o, const ghobject_t& newoid) ;
445 int lfn_unlink(const coll_t& cid, const ghobject_t& o, const SequencerPosition &spos,
446 bool force_clear_omap=false);
447
448 public:
449 FileStore(CephContext* cct, const std::string &base, const std::string &jdev,
450 osflagbits_t flags = 0,
451 const char *internal_name = "filestore", bool update_to=false);
452 ~FileStore() override;
453
454 string get_type() override {
455 return "filestore";
456 }
457
458 int _detect_fs();
459 int _sanity_check_fs();
460
461 bool test_mount_in_use() override;
462 int read_op_seq(uint64_t *seq);
463 int write_op_seq(int, uint64_t seq);
464 int mount() override;
465 int umount() override;
466
467 int validate_hobject_key(const hobject_t &obj) const override;
468
469 unsigned get_max_attr_name_length() override {
470 // xattr limit is 128; leave room for our prefixes (user.ceph._),
471 // some margin, and cap at 100
472 return 100;
473 }
474 int mkfs() override;
475 int mkjournal() override;
476 bool wants_journal() override {
477 return true;
478 }
479 bool allows_journal() override {
480 return true;
481 }
482 bool needs_journal() override {
483 return false;
484 }
485
486 bool is_rotational() override;
487
488 void dump_perf_counters(Formatter *f) override {
489 f->open_object_section("perf_counters");
490 logger->dump_formatted(f, false);
491 f->close_section();
492 }
493
494 int write_version_stamp();
495 int version_stamp_is_valid(uint32_t *version);
496 int update_version_stamp();
497 int upgrade() override;
498
499 bool can_sort_nibblewise() override {
500 return true; // i support legacy sort order
501 }
502
503 void collect_metadata(map<string,string> *pm) override;
504
505 int statfs(struct store_statfs_t *buf) override;
506
507 int _do_transactions(
508 vector<Transaction> &tls, uint64_t op_seq,
509 ThreadPool::TPHandle *handle);
510 int do_transactions(vector<Transaction> &tls, uint64_t op_seq) override {
511 return _do_transactions(tls, op_seq, 0);
512 }
513 void _do_transaction(
514 Transaction& t, uint64_t op_seq, int trans_num,
515 ThreadPool::TPHandle *handle);
516
517 int queue_transactions(Sequencer *osr, vector<Transaction>& tls,
518 TrackedOpRef op = TrackedOpRef(),
519 ThreadPool::TPHandle *handle = NULL) override;
520
521 /**
522 * set replay guard xattr on given file
523 *
524 * This will ensure that we will not replay this (or any previous) operation
525 * against this particular inode/object.
526 *
527 * @param fd open file descriptor for the file/object
528 * @param spos sequencer position of the last operation we should not replay
529 */
530 void _set_replay_guard(int fd,
531 const SequencerPosition& spos,
532 const ghobject_t *oid=0,
533 bool in_progress=false);
534 void _set_replay_guard(const coll_t& cid,
535 const SequencerPosition& spos,
536 bool in_progress);
537 void _set_global_replay_guard(const coll_t& cid,
538 const SequencerPosition &spos);
539
540 /// close a replay guard opened with in_progress=true
541 void _close_replay_guard(int fd, const SequencerPosition& spos,
542 const ghobject_t *oid=0);
543 void _close_replay_guard(const coll_t& cid, const SequencerPosition& spos);
544
545 /**
546 * check replay guard xattr on given file
547 *
548 * Check the current position against any marker on the file that
549 * indicates which operations have already been applied. If the
550 * current or a newer operation has been marked as applied, we
551 * should not replay the current operation again.
552 *
553 * If we are not replaying the journal, we already return true. It
554 * is only on replay that we might return false, indicated that the
555 * operation should not be performed (again).
556 *
557 * @param fd open fd on the file/object in question
558 * @param spos sequencerposition for an operation we could apply/replay
559 * @return 1 if we can apply (maybe replay) this operation, -1 if spos has already been applied, 0 if it was in progress
560 */
561 int _check_replay_guard(int fd, const SequencerPosition& spos);
562 int _check_replay_guard(const coll_t& cid, const SequencerPosition& spos);
563 int _check_replay_guard(const coll_t& cid, const ghobject_t &oid, const SequencerPosition& pos);
564 int _check_global_replay_guard(const coll_t& cid, const SequencerPosition& spos);
565
566 // ------------------
567 // objects
568 int pick_object_revision_lt(ghobject_t& oid) {
569 return 0;
570 }
571 using ObjectStore::exists;
572 bool exists(const coll_t& cid, const ghobject_t& oid) override;
573 using ObjectStore::stat;
574 int stat(
575 const coll_t& cid,
576 const ghobject_t& oid,
577 struct stat *st,
578 bool allow_eio = false) override;
579 using ObjectStore::set_collection_opts;
580 int set_collection_opts(
581 const coll_t& cid,
582 const pool_opts_t& opts) override;
583 using ObjectStore::read;
584 int read(
585 const coll_t& cid,
586 const ghobject_t& oid,
587 uint64_t offset,
588 size_t len,
589 bufferlist& bl,
590 uint32_t op_flags = 0) override;
591 int _do_fiemap(int fd, uint64_t offset, size_t len,
592 map<uint64_t, uint64_t> *m);
593 int _do_seek_hole_data(int fd, uint64_t offset, size_t len,
594 map<uint64_t, uint64_t> *m);
595 using ObjectStore::fiemap;
596 int fiemap(const coll_t& cid, const ghobject_t& oid, uint64_t offset, size_t len, bufferlist& bl) override;
597 int fiemap(const coll_t& cid, const ghobject_t& oid, uint64_t offset, size_t len, map<uint64_t, uint64_t>& destmap) override;
598
599 int _touch(const coll_t& cid, const ghobject_t& oid);
600 int _write(const coll_t& cid, const ghobject_t& oid, uint64_t offset, size_t len,
601 const bufferlist& bl, uint32_t fadvise_flags = 0);
602 int _zero(const coll_t& cid, const ghobject_t& oid, uint64_t offset, size_t len);
603 int _truncate(const coll_t& cid, const ghobject_t& oid, uint64_t size);
604 int _clone(const coll_t& cid, const ghobject_t& oldoid, const ghobject_t& newoid,
605 const SequencerPosition& spos);
606 int _clone_range(const coll_t& oldcid, const ghobject_t& oldoid, const coll_t& newcid, const ghobject_t& newoid,
607 uint64_t srcoff, uint64_t len, uint64_t dstoff,
608 const SequencerPosition& spos);
609 int _do_clone_range(int from, int to, uint64_t srcoff, uint64_t len, uint64_t dstoff);
610 int _do_sparse_copy_range(int from, int to, uint64_t srcoff, uint64_t len, uint64_t dstoff);
611 int _do_copy_range(int from, int to, uint64_t srcoff, uint64_t len, uint64_t dstoff, bool skip_sloppycrc=false);
612 int _remove(const coll_t& cid, const ghobject_t& oid, const SequencerPosition &spos);
613
614 int _fgetattr(int fd, const char *name, bufferptr& bp);
615 int _fgetattrs(int fd, map<string,bufferptr>& aset);
616 int _fsetattrs(int fd, map<string, bufferptr> &aset);
617
618 void _start_sync();
619
620 void do_force_sync();
621 void start_sync(Context *onsafe);
622 void sync();
623 void _flush_op_queue();
624 void flush();
625 void sync_and_flush();
626
627 int flush_journal() override;
628 int dump_journal(ostream& out) override;
629
630 void set_fsid(uuid_d u) override {
631 fsid = u;
632 }
633 uuid_d get_fsid() override { return fsid; }
634
635 uint64_t estimate_objects_overhead(uint64_t num_objects) override;
636
637 // DEBUG read error injection, an object is removed from both on delete()
638 Mutex read_error_lock;
639 set<ghobject_t> data_error_set; // read() will return -EIO
640 set<ghobject_t> mdata_error_set; // getattr(),stat() will return -EIO
641 void inject_data_error(const ghobject_t &oid) override;
642 void inject_mdata_error(const ghobject_t &oid) override;
643
644 void compact() override {
645 assert(object_map);
646 object_map->compact();
647 }
648
649 void debug_obj_on_delete(const ghobject_t &oid);
650 bool debug_data_eio(const ghobject_t &oid);
651 bool debug_mdata_eio(const ghobject_t &oid);
652
653 int snapshot(const string& name) override;
654
655 // attrs
656 using ObjectStore::getattr;
657 using ObjectStore::getattrs;
658 int getattr(const coll_t& cid, const ghobject_t& oid, const char *name, bufferptr &bp) override;
659 int getattrs(const coll_t& cid, const ghobject_t& oid, map<string,bufferptr>& aset) override;
660
661 int _setattrs(const coll_t& cid, const ghobject_t& oid, map<string,bufferptr>& aset,
662 const SequencerPosition &spos);
663 int _rmattr(const coll_t& cid, const ghobject_t& oid, const char *name,
664 const SequencerPosition &spos);
665 int _rmattrs(const coll_t& cid, const ghobject_t& oid,
666 const SequencerPosition &spos);
667
668 int _collection_remove_recursive(const coll_t &cid,
669 const SequencerPosition &spos);
670
671 int _collection_set_bits(const coll_t& cid, int bits);
672
673 // collections
674 using ObjectStore::collection_list;
675 int collection_bits(const coll_t& c) override;
676 int collection_list(const coll_t& c,
677 const ghobject_t& start, const ghobject_t& end, int max,
678 vector<ghobject_t> *ls, ghobject_t *next) override;
679 int list_collections(vector<coll_t>& ls) override;
680 int list_collections(vector<coll_t>& ls, bool include_temp);
681 int collection_stat(const coll_t& c, struct stat *st);
682 bool collection_exists(const coll_t& c) override;
683 int collection_empty(const coll_t& c, bool *empty) override;
684
685 // omap (see ObjectStore.h for documentation)
686 using ObjectStore::omap_get;
687 int omap_get(const coll_t& c, const ghobject_t &oid, bufferlist *header,
688 map<string, bufferlist> *out) override;
689 using ObjectStore::omap_get_header;
690 int omap_get_header(
691 const coll_t& c,
692 const ghobject_t &oid,
693 bufferlist *out,
694 bool allow_eio = false) override;
695 using ObjectStore::omap_get_keys;
696 int omap_get_keys(const coll_t& c, const ghobject_t &oid, set<string> *keys) override;
697 using ObjectStore::omap_get_values;
698 int omap_get_values(const coll_t& c, const ghobject_t &oid, const set<string> &keys,
699 map<string, bufferlist> *out) override;
700 using ObjectStore::omap_check_keys;
701 int omap_check_keys(const coll_t& c, const ghobject_t &oid, const set<string> &keys,
702 set<string> *out) override;
703 using ObjectStore::get_omap_iterator;
704 ObjectMap::ObjectMapIterator get_omap_iterator(const coll_t& c, const ghobject_t &oid) override;
705
706 int _create_collection(const coll_t& c, int bits,
707 const SequencerPosition &spos);
708 int _destroy_collection(const coll_t& c);
709 /**
710 * Give an expected number of objects hint to the collection.
711 *
712 * @param c - collection id.
713 * @param pg_num - pg number of the pool this collection belongs to
714 * @param expected_num_objs - expected number of objects in this collection
715 * @param spos - sequence position
716 *
717 * @return 0 on success, an error code otherwise
718 */
719 int _collection_hint_expected_num_objs(const coll_t& c, uint32_t pg_num,
720 uint64_t expected_num_objs,
721 const SequencerPosition &spos);
722 int _collection_add(const coll_t& c, const coll_t& ocid, const ghobject_t& oid,
723 const SequencerPosition& spos);
724 int _collection_move_rename(const coll_t& oldcid, const ghobject_t& oldoid,
725 coll_t c, const ghobject_t& o,
726 const SequencerPosition& spos,
727 bool ignore_enoent = false);
728
729 int _set_alloc_hint(const coll_t& cid, const ghobject_t& oid,
730 uint64_t expected_object_size,
731 uint64_t expected_write_size);
732
733 void dump_start(const std::string& file);
734 void dump_stop();
735 void dump_transactions(vector<Transaction>& ls, uint64_t seq, OpSequencer *osr);
736
737 virtual int apply_layout_settings(const coll_t &cid);
738
739 private:
740 void _inject_failure();
741
742 // omap
743 int _omap_clear(const coll_t& cid, const ghobject_t &oid,
744 const SequencerPosition &spos);
745 int _omap_setkeys(const coll_t& cid, const ghobject_t &oid,
746 const map<string, bufferlist> &aset,
747 const SequencerPosition &spos);
748 int _omap_rmkeys(const coll_t& cid, const ghobject_t &oid, const set<string> &keys,
749 const SequencerPosition &spos);
750 int _omap_rmkeyrange(const coll_t& cid, const ghobject_t &oid,
751 const string& first, const string& last,
752 const SequencerPosition &spos);
753 int _omap_setheader(const coll_t& cid, const ghobject_t &oid, const bufferlist &bl,
754 const SequencerPosition &spos);
755 int _split_collection(const coll_t& cid, uint32_t bits, uint32_t rem, coll_t dest,
756 const SequencerPosition &spos);
757 int _split_collection_create(const coll_t& cid, uint32_t bits, uint32_t rem,
758 coll_t dest,
759 const SequencerPosition &spos);
760
761 const char** get_tracked_conf_keys() const override;
762 void handle_conf_change(const struct md_config_t *conf,
763 const std::set <std::string> &changed) override;
764 int set_throttle_params();
765 float m_filestore_commit_timeout;
766 bool m_filestore_journal_parallel;
767 bool m_filestore_journal_trailing;
768 bool m_filestore_journal_writeahead;
769 int m_filestore_fiemap_threshold;
770 double m_filestore_max_sync_interval;
771 double m_filestore_min_sync_interval;
772 bool m_filestore_fail_eio;
773 bool m_filestore_fadvise;
774 int do_update;
775 bool m_journal_dio, m_journal_aio, m_journal_force_aio;
776 std::string m_osd_rollback_to_cluster_snap;
777 bool m_osd_use_stale_snap;
778 bool m_filestore_do_dump;
779 std::ofstream m_filestore_dump;
780 JSONFormatter m_filestore_dump_fmt;
781 std::atomic<int64_t> m_filestore_kill_at = { 0 };
782 bool m_filestore_sloppy_crc;
783 int m_filestore_sloppy_crc_block_size;
784 uint64_t m_filestore_max_alloc_hint_size;
785 long m_fs_type;
786
787 //Determined xattr handling based on fs type
788 void set_xattr_limits_via_conf();
789 uint32_t m_filestore_max_inline_xattr_size;
790 uint32_t m_filestore_max_inline_xattrs;
791 uint32_t m_filestore_max_xattr_value_size;
792
793 FSSuperblock superblock;
794
795 /**
796 * write_superblock()
797 *
798 * Write superblock to persisent storage
799 *
800 * return value: 0 on success, otherwise negative errno
801 */
802 int write_superblock();
803
804 /**
805 * read_superblock()
806 *
807 * Fill in FileStore::superblock by reading persistent storage
808 *
809 * return value: 0 on success, otherwise negative errno
810 */
811 int read_superblock();
812
813 friend class FileStoreBackend;
814 friend class TestFileStore;
815 };
816
817 ostream& operator<<(ostream& out, const FileStore::OpSequencer& s);
818
819 struct fiemap;
820
821 class FileStoreBackend {
822 private:
823 FileStore *filestore;
824 protected:
825 int get_basedir_fd() {
826 return filestore->basedir_fd;
827 }
828 int get_current_fd() {
829 return filestore->current_fd;
830 }
831 int get_op_fd() {
832 return filestore->op_fd;
833 }
834 size_t get_blksize() {
835 return filestore->blk_size;
836 }
837 const string& get_basedir_path() {
838 return filestore->basedir;
839 }
840 const string& get_current_path() {
841 return filestore->current_fn;
842 }
843 int _copy_range(int from, int to, uint64_t srcoff, uint64_t len, uint64_t dstoff) {
844 if (has_fiemap() || has_seek_data_hole()) {
845 return filestore->_do_sparse_copy_range(from, to, srcoff, len, dstoff);
846 } else {
847 return filestore->_do_copy_range(from, to, srcoff, len, dstoff);
848 }
849 }
850 int get_crc_block_size() {
851 return filestore->m_filestore_sloppy_crc_block_size;
852 }
853
854 public:
855 explicit FileStoreBackend(FileStore *fs) : filestore(fs) {}
856 virtual ~FileStoreBackend() {}
857
858 CephContext* cct() const {
859 return filestore->cct;
860 }
861
862 static FileStoreBackend *create(long f_type, FileStore *fs);
863
864 virtual const char *get_name() = 0;
865 virtual int detect_features() = 0;
866 virtual int create_current() = 0;
867 virtual bool can_checkpoint() = 0;
868 virtual int list_checkpoints(list<string>& ls) = 0;
869 virtual int create_checkpoint(const string& name, uint64_t *cid) = 0;
870 virtual int sync_checkpoint(uint64_t id) = 0;
871 virtual int rollback_to(const string& name) = 0;
872 virtual int destroy_checkpoint(const string& name) = 0;
873 virtual int syncfs() = 0;
874 virtual bool has_fiemap() = 0;
875 virtual bool has_seek_data_hole() = 0;
876 virtual bool is_rotational() = 0;
877 virtual int do_fiemap(int fd, off_t start, size_t len, struct fiemap **pfiemap) = 0;
878 virtual int clone_range(int from, int to, uint64_t srcoff, uint64_t len, uint64_t dstoff) = 0;
879 virtual int set_alloc_hint(int fd, uint64_t hint) = 0;
880 virtual bool has_splice() const = 0;
881
882 // hooks for (sloppy) crc tracking
883 virtual int _crc_update_write(int fd, loff_t off, size_t len, const bufferlist& bl) = 0;
884 virtual int _crc_update_truncate(int fd, loff_t off) = 0;
885 virtual int _crc_update_zero(int fd, loff_t off, size_t len) = 0;
886 virtual int _crc_update_clone_range(int srcfd, int destfd,
887 loff_t srcoff, size_t len, loff_t dstoff) = 0;
888 virtual int _crc_verify_read(int fd, loff_t off, size_t len, const bufferlist& bl,
889 ostream *out) = 0;
890 };
891
892 #endif