1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
14 #ifndef CEPH_OBJECTSTORE_H
15 #define CEPH_OBJECTSTORE_H
17 #include "include/Context.h"
18 #include "include/buffer.h"
19 #include "include/types.h"
20 #include "osd/osd_types.h"
21 #include "common/TrackedOp.h"
22 #include "common/WorkQueue.h"
23 #include "ObjectMap.h"
30 #if defined(DARWIN) || defined(__FreeBSD__) || defined(__sun)
31 #include <sys/statvfs.h>
33 #include <sys/vfs.h> /* or <sys/statfs.h> */
36 #define OPS_PER_PTR 32
49 * low-level interface to the local OSD file system
55 static inline void encode(const map
<string
,bufferptr
> *attrset
, bufferlist
&bl
) {
56 ::encode(*attrset
, bl
);
59 // this isn't the best place for these, but...
60 void decode_str_str_map_to_bl(bufferlist::iterator
& p
, bufferlist
*out
);
61 void decode_str_set_to_bl(bufferlist::iterator
& p
, bufferlist
*out
);
64 typedef uint32_t osflagbits_t
;
65 const int SKIP_JOURNAL_REPLAY
= 1 << 0;
66 const int SKIP_MOUNT_OMAP
= 1 << 1;
75 * create - create an ObjectStore instance.
77 * This is invoked once at initialization time.
79 * @param type type of store. This is a string from the configuration file.
80 * @param data path (or other descriptor) for data
81 * @param journal path (or other descriptor) for journal (optional)
82 * @param flags which filestores should check if applicable
84 static ObjectStore
*create(CephContext
*cct
,
87 const string
& journal
,
88 osflagbits_t flags
= 0);
91 * probe a block device to learn the uuid of the owning OSD
94 * @param path path to device
95 * @param fsid [out] osd uuid
97 static int probe_block_device_fsid(
103 * Fetch Object Store statistics.
105 * Currently only latency of write and apply times are measured.
107 * This appears to be called with nothing locked.
109 virtual objectstore_perf_stat_t
get_cur_stats() = 0;
112 * Fetch Object Store performance counters.
115 * This appears to be called with nothing locked.
117 virtual const PerfCounters
* get_perf_counters() const = 0;
120 * a sequencer orders transactions
122 * Any transactions queued under a given sequencer will be applied in
123 * sequence. Transactions queued under different sequencers may run
126 * Clients of ObjectStore create and maintain their own Sequencer objects.
127 * When a list of transactions is queued the caller specifies a Sequencer to be used.
132 * ABC for Sequencer implementation, private to the ObjectStore derived class.
133 * created in ...::queue_transaction(s)
135 struct Sequencer_impl
: public RefCountedObject
{
138 // block until any previous transactions are visible. specifically,
139 // collection_list and collection_empty need to reflect prior operations.
140 virtual void flush() = 0;
142 // called when we are done with the impl. the impl may have a different
143 // (longer) lifecycle than the Sequencer.
144 virtual void discard() {}
149 * There are two cases:
150 * 1) sequencer is currently idle: the method returns true. c is
152 * 2) sequencer is not idle: the method returns false and c is
153 * called asyncronously with a value of 0 once all transactions
154 * queued on this sequencer prior to the call have been applied
157 virtual bool flush_commit(
158 Context
*c
///< [in] context to call upon flush/commit
159 ) = 0; ///< @return true if idle, false otherwise
161 Sequencer_impl(CephContext
* cct
) : RefCountedObject(NULL
, 0), cct(cct
) {}
162 ~Sequencer_impl() override
{}
164 typedef boost::intrusive_ptr
<Sequencer_impl
> Sequencer_implRef
;
167 * External (opaque) sequencer implementation
174 explicit Sequencer(string n
)
175 : name(n
), shard_hint(spg_t()), p(NULL
) {
179 p
->discard(); // tell impl we are done with it
182 /// return a unique string identifier for this sequencer
183 const string
& get_name() const {
186 /// wait for any queued transactions on this sequencer to apply
192 /// @see Sequencer_impl::flush_commit()
193 bool flush_commit(Context
*c
) {
197 return p
->flush_commit(c
);
202 struct CollectionImpl
: public RefCountedObject
{
203 virtual const coll_t
&get_cid() = 0;
204 CollectionImpl() : RefCountedObject(NULL
, 0) {}
206 typedef boost::intrusive_ptr
<CollectionImpl
> CollectionHandle
;
208 struct CompatCollectionHandle
: public CollectionImpl
{
210 explicit CompatCollectionHandle(coll_t c
) : cid(c
) {}
211 const coll_t
&get_cid() override
{
216 /*********************************
218 * Object Contents and semantics
220 * All ObjectStore objects are identified as a named object
221 * (ghobject_t and hobject_t) in a named collection (coll_t).
222 * ObjectStore operations support the creation, mutation, deletion
223 * and enumeration of objects within a collection. Enumeration is
224 * in sorted key order (where keys are sorted by hash). Object names
225 * are globally unique.
227 * Each object has four distinct parts: byte data, xattrs, omap_header
230 * The data portion of an object is conceptually equivalent to a
231 * file in a file system. Random and Partial access for both read
232 * and write operations is required. The ability to have a sparse
233 * implementation of the data portion of an object is beneficial for
234 * some workloads, but not required. There is a system-wide limit on
235 * the maximum size of an object, which is typically around 100 MB.
237 * Xattrs are equivalent to the extended attributes of file
238 * systems. Xattrs are a set of key/value pairs. Sub-value access
239 * is not required. It is possible to enumerate the set of xattrs in
240 * key order. At the implementation level, xattrs are used
241 * exclusively internal to Ceph and the implementer can expect the
242 * total size of all of the xattrs on an object to be relatively
243 * small, i.e., less than 64KB. Much of Ceph assumes that accessing
244 * xattrs on temporally adjacent object accesses (recent past or
245 * near future) is inexpensive.
247 * omap_header is a single blob of data. It can be read or written
250 * Omap entries are conceptually the same as xattrs
251 * but in a different address space. In other words, you can have
252 * the same key as an xattr and an omap entry and they have distinct
253 * values. Enumeration of xattrs doesn't include omap entries and
254 * vice versa. The size and access characteristics of omap entries
255 * are very different from xattrs. In particular, the value portion
256 * of an omap entry can be quite large (MBs). More importantly, the
257 * interface must support efficient range queries on omap entries even
258 * when there are a large numbers of entries.
260 *********************************/
262 /*******************************
266 * A collection is simply a grouping of objects. Collections have
267 * names (coll_t) and can be enumerated in order. Like an
268 * individual object, a collection also has a set of xattrs.
274 /*********************************
277 * A Transaction represents a sequence of primitive mutation
280 * Three events in the life of a Transaction result in
281 * callbacks. Any Transaction can contain any number of callback
282 * objects (Context) for any combination of the three classes of
285 * on_applied_sync, on_applied, and on_commit.
287 * The "on_applied" and "on_applied_sync" callbacks are invoked when
288 * the modifications requested by the Transaction are visible to
289 * subsequent ObjectStore operations, i.e., the results are
290 * readable. The only conceptual difference between on_applied and
291 * on_applied_sync is the specific thread and locking environment in
292 * which the callbacks operate. "on_applied_sync" is called
293 * directly by an ObjectStore execution thread. It is expected to
294 * execute quickly and must not acquire any locks of the calling
295 * environment. Conversely, "on_applied" is called from the separate
296 * Finisher thread, meaning that it can contend for calling
297 * environment locks. NB, on_applied and on_applied_sync are
298 * sometimes called on_readable and on_readable_sync.
300 * The "on_commit" callback is also called from the Finisher thread
301 * and indicates that all of the mutations have been durably
302 * committed to stable storage (i.e., are now software/hardware
305 * At the implementation level, each mutation primitive (and its
306 * associated data) can be serialized to a single buffer. That
307 * serialization, however, does not copy any data, but (using the
308 * bufferlist library) will reference the original buffers. This
309 * implies that the buffer that contains the data being submitted
310 * must remain stable until the on_commit callback completes. In
311 * practice, bufferlist handles all of this for you and this
312 * subtlety is only relevant if you are referencing an existing
313 * buffer via buffer::raw_static.
315 * Some implementations of ObjectStore choose to implement their own
316 * form of journaling that uses the serialized form of a
317 * Transaction. This requires that the encode/decode logic properly
318 * version itself and handle version upgrades that might change the
319 * format of the encoded Transaction. This has already happened a
320 * couple of times and the Transaction object contains some helper
321 * variables that aid in this legacy decoding:
323 * sobject_encoding detects an older/simpler version of oid
324 * present in pre-bobtail versions of ceph. use_pool_override
325 * also detects a situation where the pool of an oid can be
326 * override for legacy operations/buffers. For non-legacy
327 * implementation of ObjectStore, neither of these fields is
331 * TRANSACTION ISOLATION
333 * Except as noted below, isolation is the responsibility of the
334 * caller. In other words, if any storage element (storage element
335 * == any of the four portions of an object as described above) is
336 * altered by a transaction (including deletion), the caller
337 * promises not to attempt to read that element while the
338 * transaction is pending (here pending means from the time of
339 * issuance until the "on_applied_sync" callback has been
340 * received). Violations of isolation need not be detected by
341 * ObjectStore and there is no corresponding error mechanism for
342 * reporting an isolation violation (crashing would be the
343 * appropriate way to report an isolation violation if detected).
345 * Enumeration operations may violate transaction isolation as
346 * described above when a storage element is being created or
347 * deleted as part of a transaction. In this case, ObjectStore is
348 * allowed to consider the enumeration operation to either precede
349 * or follow the violating transaction element. In other words, the
350 * presence/absence of the mutated element in the enumeration is
351 * entirely at the discretion of ObjectStore. The arbitrary ordering
352 * applies independently to each transaction element. For example,
353 * if a transaction contains two mutating elements "create A" and
354 * "delete B". And an enumeration operation is performed while this
355 * transaction is pending. It is permissable for ObjectStore to
356 * report any of the four possible combinations of the existence of
364 OP_TOUCH
= 9, // cid, oid
365 OP_WRITE
= 10, // cid, oid, offset, len, bl
366 OP_ZERO
= 11, // cid, oid, offset, len
367 OP_TRUNCATE
= 12, // cid, oid, len
368 OP_REMOVE
= 13, // cid, oid
369 OP_SETATTR
= 14, // cid, oid, attrname, bl
370 OP_SETATTRS
= 15, // cid, oid, attrset
371 OP_RMATTR
= 16, // cid, oid, attrname
372 OP_CLONE
= 17, // cid, oid, newoid
373 OP_CLONERANGE
= 18, // cid, oid, newoid, offset, len
374 OP_CLONERANGE2
= 30, // cid, oid, newoid, srcoff, len, dstoff
376 OP_TRIMCACHE
= 19, // cid, oid, offset, len **DEPRECATED**
378 OP_MKCOLL
= 20, // cid
379 OP_RMCOLL
= 21, // cid
380 OP_COLL_ADD
= 22, // cid, oldcid, oid
381 OP_COLL_REMOVE
= 23, // cid, oid
382 OP_COLL_SETATTR
= 24, // cid, attrname, bl
383 OP_COLL_RMATTR
= 25, // cid, attrname
384 OP_COLL_SETATTRS
= 26, // cid, attrset
385 OP_COLL_MOVE
= 8, // newcid, oldcid, oid
387 OP_STARTSYNC
= 27, // start a sync
389 OP_RMATTRS
= 28, // cid, oid
390 OP_COLL_RENAME
= 29, // cid, newcid
392 OP_OMAP_CLEAR
= 31, // cid
393 OP_OMAP_SETKEYS
= 32, // cid, attrset
394 OP_OMAP_RMKEYS
= 33, // cid, keyset
395 OP_OMAP_SETHEADER
= 34, // cid, header
396 OP_SPLIT_COLLECTION
= 35, // cid, bits, destination
397 OP_SPLIT_COLLECTION2
= 36, /* cid, bits, destination
398 doesn't create the destination */
399 OP_OMAP_RMKEYRANGE
= 37, // cid, oid, firstkey, lastkey
400 OP_COLL_MOVE_RENAME
= 38, // oldcid, oldoid, newcid, newoid
402 OP_SETALLOCHINT
= 39, // cid, oid, object_size, write_size
403 OP_COLL_HINT
= 40, // cid, type, bl
405 OP_TRY_RENAME
= 41, // oldcid, oldoid, newoid
407 OP_COLL_SET_BITS
= 42, // cid, bits
410 // Transaction hint type
412 COLL_HINT_EXPECTED_NUM_OBJECTS
= 1,
422 __le32 dest_oid
; //OP_CLONE, OP_CLONERANGE
423 __le64 dest_off
; //OP_CLONERANGE
426 __le32 hint_type
; //OP_COLL_HINT
429 __le32 alloc_hint_flags
; //OP_SETALLOCHINT
432 __le64 expected_object_size
; //OP_SETALLOCHINT
433 __le64 expected_write_size
; //OP_SETALLOCHINT
434 __le32 split_bits
; //OP_SPLIT_COLLECTION2,OP_COLL_SET_BITS,
436 __le32 split_rem
; //OP_SPLIT_COLLECTION2
437 } __attribute__ ((packed
)) ;
439 struct TransactionData
{
441 __le32 largest_data_len
;
442 __le32 largest_data_off
;
443 __le32 largest_data_off_in_data_bl
;
444 __le32 fadvise_flags
;
446 TransactionData() noexcept
:
450 largest_data_off_in_data_bl(0),
453 // override default move operations to reset default values
454 TransactionData(TransactionData
&& other
) noexcept
:
456 largest_data_len(other
.largest_data_len
),
457 largest_data_off(other
.largest_data_off
),
458 largest_data_off_in_data_bl(other
.largest_data_off_in_data_bl
),
459 fadvise_flags(other
.fadvise_flags
) {
461 other
.largest_data_len
= 0;
462 other
.largest_data_off
= 0;
463 other
.largest_data_off_in_data_bl
= 0;
464 other
.fadvise_flags
= 0;
466 TransactionData
& operator=(TransactionData
&& other
) noexcept
{
468 largest_data_len
= other
.largest_data_len
;
469 largest_data_off
= other
.largest_data_off
;
470 largest_data_off_in_data_bl
= other
.largest_data_off_in_data_bl
;
471 fadvise_flags
= other
.fadvise_flags
;
473 other
.largest_data_len
= 0;
474 other
.largest_data_off
= 0;
475 other
.largest_data_off_in_data_bl
= 0;
476 other
.fadvise_flags
= 0;
480 TransactionData(const TransactionData
& other
) = default;
481 TransactionData
& operator=(const TransactionData
& other
) = default;
483 void encode(bufferlist
& bl
) const {
484 bl
.append((char*)this, sizeof(TransactionData
));
486 void decode(bufferlist::iterator
&bl
) {
487 bl
.copy(sizeof(TransactionData
), (char*)this);
489 } __attribute__ ((packed
)) ;
492 TransactionData data
;
494 void *osr
{nullptr}; // NULL on replay
496 map
<coll_t
, __le32
> coll_index
;
497 map
<ghobject_t
, __le32
> object_index
;
500 __le32 object_id
{0};
507 list
<Context
*> on_applied
;
508 list
<Context
*> on_commit
;
509 list
<Context
*> on_applied_sync
;
512 Transaction() = default;
514 explicit Transaction(bufferlist::iterator
&dp
) {
517 explicit Transaction(bufferlist
&nbl
) {
518 bufferlist::iterator dp
= nbl
.begin();
522 // override default move operations to reset default values
523 Transaction(Transaction
&& other
) noexcept
:
524 data(std::move(other
.data
)),
526 coll_index(std::move(other
.coll_index
)),
527 object_index(std::move(other
.object_index
)),
528 coll_id(other
.coll_id
),
529 object_id(other
.object_id
),
530 data_bl(std::move(other
.data_bl
)),
531 op_bl(std::move(other
.op_bl
)),
532 op_ptr(std::move(other
.op_ptr
)),
533 on_applied(std::move(other
.on_applied
)),
534 on_commit(std::move(other
.on_commit
)),
535 on_applied_sync(std::move(other
.on_applied_sync
)) {
541 Transaction
& operator=(Transaction
&& other
) noexcept
{
542 data
= std::move(other
.data
);
544 coll_index
= std::move(other
.coll_index
);
545 object_index
= std::move(other
.object_index
);
546 coll_id
= other
.coll_id
;
547 object_id
= other
.object_id
;
548 data_bl
= std::move(other
.data_bl
);
549 op_bl
= std::move(other
.op_bl
);
550 op_ptr
= std::move(other
.op_ptr
);
551 on_applied
= std::move(other
.on_applied
);
552 on_commit
= std::move(other
.on_commit
);
553 on_applied_sync
= std::move(other
.on_applied_sync
);
560 Transaction(const Transaction
& other
) = default;
561 Transaction
& operator=(const Transaction
& other
) = default;
563 /* Operations on callback contexts */
564 void register_on_applied(Context
*c
) {
566 on_applied
.push_back(c
);
568 void register_on_commit(Context
*c
) {
570 on_commit
.push_back(c
);
572 void register_on_applied_sync(Context
*c
) {
574 on_applied_sync
.push_back(c
);
576 void register_on_complete(Context
*c
) {
578 RunOnDeleteRef
_complete (std::make_shared
<RunOnDelete
>(c
));
579 register_on_applied(new ContainerContext
<RunOnDeleteRef
>(_complete
));
580 register_on_commit(new ContainerContext
<RunOnDeleteRef
>(_complete
));
583 static void collect_contexts(
584 vector
<Transaction
>& t
,
585 Context
**out_on_applied
,
586 Context
**out_on_commit
,
587 Context
**out_on_applied_sync
) {
588 assert(out_on_applied
);
589 assert(out_on_commit
);
590 assert(out_on_applied_sync
);
591 list
<Context
*> on_applied
, on_commit
, on_applied_sync
;
592 for (vector
<Transaction
>::iterator i
= t
.begin();
595 on_applied
.splice(on_applied
.end(), (*i
).on_applied
);
596 on_commit
.splice(on_commit
.end(), (*i
).on_commit
);
597 on_applied_sync
.splice(on_applied_sync
.end(), (*i
).on_applied_sync
);
599 *out_on_applied
= C_Contexts::list_to_context(on_applied
);
600 *out_on_commit
= C_Contexts::list_to_context(on_commit
);
601 *out_on_applied_sync
= C_Contexts::list_to_context(on_applied_sync
);
604 Context
*get_on_applied() {
605 return C_Contexts::list_to_context(on_applied
);
607 Context
*get_on_commit() {
608 return C_Contexts::list_to_context(on_commit
);
610 Context
*get_on_applied_sync() {
611 return C_Contexts::list_to_context(on_applied_sync
);
614 void set_fadvise_flags(uint32_t flags
) {
615 data
.fadvise_flags
= flags
;
617 void set_fadvise_flag(uint32_t flag
) {
618 data
.fadvise_flags
= data
.fadvise_flags
| flag
;
620 uint32_t get_fadvise_flags() { return data
.fadvise_flags
; }
622 void swap(Transaction
& other
) noexcept
{
623 std::swap(data
, other
.data
);
624 std::swap(on_applied
, other
.on_applied
);
625 std::swap(on_commit
, other
.on_commit
);
626 std::swap(on_applied_sync
, other
.on_applied_sync
);
628 std::swap(coll_index
, other
.coll_index
);
629 std::swap(object_index
, other
.object_index
);
630 std::swap(coll_id
, other
.coll_id
);
631 std::swap(object_id
, other
.object_id
);
632 op_bl
.swap(other
.op_bl
);
633 data_bl
.swap(other
.data_bl
);
636 void _update_op(Op
* op
,
638 vector
<__le32
> &om
) {
653 case OP_OMAP_SETKEYS
:
655 case OP_OMAP_RMKEYRANGE
:
656 case OP_OMAP_SETHEADER
:
660 case OP_SETALLOCHINT
:
661 assert(op
->cid
< cm
.size());
662 assert(op
->oid
< om
.size());
663 op
->cid
= cm
[op
->cid
];
664 op
->oid
= om
[op
->oid
];
669 assert(op
->cid
< cm
.size());
670 assert(op
->oid
< om
.size());
671 assert(op
->dest_oid
< om
.size());
672 op
->cid
= cm
[op
->cid
];
673 op
->oid
= om
[op
->oid
];
674 op
->dest_oid
= om
[op
->dest_oid
];
679 case OP_COLL_SETATTR
:
681 case OP_COLL_SETATTRS
:
683 case OP_COLL_SET_BITS
:
684 assert(op
->cid
< cm
.size());
685 op
->cid
= cm
[op
->cid
];
689 assert(op
->cid
< cm
.size());
690 assert(op
->oid
< om
.size());
691 assert(op
->dest_cid
< om
.size());
692 op
->cid
= cm
[op
->cid
];
693 op
->dest_cid
= cm
[op
->dest_cid
];
694 op
->oid
= om
[op
->oid
];
697 case OP_COLL_MOVE_RENAME
:
698 assert(op
->cid
< cm
.size());
699 assert(op
->oid
< om
.size());
700 assert(op
->dest_cid
< cm
.size());
701 assert(op
->dest_oid
< om
.size());
702 op
->cid
= cm
[op
->cid
];
703 op
->oid
= om
[op
->oid
];
704 op
->dest_cid
= cm
[op
->dest_cid
];
705 op
->dest_oid
= om
[op
->dest_oid
];
709 assert(op
->cid
< cm
.size());
710 assert(op
->oid
< om
.size());
711 assert(op
->dest_oid
< om
.size());
712 op
->cid
= cm
[op
->cid
];
713 op
->oid
= om
[op
->oid
];
714 op
->dest_oid
= om
[op
->dest_oid
];
717 case OP_SPLIT_COLLECTION2
:
718 assert(op
->cid
< cm
.size());
719 assert(op
->dest_cid
< cm
.size());
720 op
->cid
= cm
[op
->cid
];
721 op
->dest_cid
= cm
[op
->dest_cid
];
725 assert(0 == "Unkown OP");
731 vector
<__le32
> &om
) {
733 list
<bufferptr
> list
= bl
.buffers();
734 std::list
<bufferptr
>::iterator p
;
736 for(p
= list
.begin(); p
!= list
.end(); ++p
) {
737 assert(p
->length() % sizeof(Op
) == 0);
739 char* raw_p
= p
->c_str();
740 char* raw_end
= raw_p
+ p
->length();
741 while (raw_p
< raw_end
) {
742 _update_op(reinterpret_cast<Op
*>(raw_p
), cm
, om
);
747 /// Append the operations of the parameter to this Transaction. Those operations are removed from the parameter Transaction
748 void append(Transaction
& other
) {
750 data
.ops
+= other
.data
.ops
;
751 if (other
.data
.largest_data_len
> data
.largest_data_len
) {
752 data
.largest_data_len
= other
.data
.largest_data_len
;
753 data
.largest_data_off
= other
.data
.largest_data_off
;
754 data
.largest_data_off_in_data_bl
= data_bl
.length() + other
.data
.largest_data_off_in_data_bl
;
756 data
.fadvise_flags
|= other
.data
.fadvise_flags
;
757 on_applied
.splice(on_applied
.end(), other
.on_applied
);
758 on_commit
.splice(on_commit
.end(), other
.on_commit
);
759 on_applied_sync
.splice(on_applied_sync
.end(), other
.on_applied_sync
);
761 //append coll_index & object_index
762 vector
<__le32
> cm(other
.coll_index
.size());
763 map
<coll_t
, __le32
>::iterator coll_index_p
;
764 for (coll_index_p
= other
.coll_index
.begin();
765 coll_index_p
!= other
.coll_index
.end();
767 cm
[coll_index_p
->second
] = _get_coll_id(coll_index_p
->first
);
770 vector
<__le32
> om(other
.object_index
.size());
771 map
<ghobject_t
, __le32
>::iterator object_index_p
;
772 for (object_index_p
= other
.object_index
.begin();
773 object_index_p
!= other
.object_index
.end();
775 om
[object_index_p
->second
] = _get_object_id(object_index_p
->first
);
778 //the other.op_bl SHOULD NOT be changes during append operation,
779 //we use additional bufferlist to avoid this problem
780 bufferptr
other_op_bl_ptr(other
.op_bl
.length());
781 other
.op_bl
.copy(0, other
.op_bl
.length(), other_op_bl_ptr
.c_str());
782 bufferlist other_op_bl
;
783 other_op_bl
.append(other_op_bl_ptr
);
785 //update other_op_bl with cm & om
786 //When the other is appended to current transaction, all coll_index and
787 //object_index in other.op_buffer should be updated by new index of the
788 //combined transaction
789 _update_op_bl(other_op_bl
, cm
, om
);
792 op_bl
.append(other_op_bl
);
794 data_bl
.append(other
.data_bl
);
797 /** Inquires about the Transaction as a whole. */
799 /// How big is the encoded Transaction buffer?
800 uint64_t get_encoded_bytes() {
801 //layout: data_bl + op_bl + coll_index + object_index + data
803 // coll_index size, object_index size and sizeof(transaction_data)
804 // all here, so they may be computed at compile-time
805 size_t final_size
= sizeof(__u32
) * 2 + sizeof(data
);
807 // coll_index second and object_index second
808 final_size
+= (coll_index
.size() + object_index
.size()) * sizeof(__le32
);
811 for (auto p
= coll_index
.begin(); p
!= coll_index
.end(); ++p
) {
812 final_size
+= p
->first
.encoded_size();
815 // object_index first
816 for (auto p
= object_index
.begin(); p
!= object_index
.end(); ++p
) {
817 final_size
+= p
->first
.encoded_size();
820 return data_bl
.length() +
825 /// Retain old version for regression testing purposes
826 uint64_t get_encoded_bytes_test() {
827 //layout: data_bl + op_bl + coll_index + object_index + data
829 ::encode(coll_index
, bl
);
830 ::encode(object_index
, bl
);
832 return data_bl
.length() +
838 uint64_t get_num_bytes() {
839 return get_encoded_bytes();
841 /// Size of largest data buffer to the "write" operation encountered so far
842 uint32_t get_data_length() {
843 return data
.largest_data_len
;
845 /// offset within the encoded buffer to the start of the largest data buffer that's encoded
846 uint32_t get_data_offset() {
847 if (data
.largest_data_off_in_data_bl
) {
848 return data
.largest_data_off_in_data_bl
+
849 sizeof(__u8
) + // encode struct_v
850 sizeof(__u8
) + // encode compat_v
851 sizeof(__u32
) + // encode len
852 sizeof(__u32
); // data_bl len
856 /// offset of buffer as aligned to destination within object.
857 int get_data_alignment() {
858 if (!data
.largest_data_len
)
860 return (0 - get_data_offset()) & ~CEPH_PAGE_MASK
;
862 /// Is the Transaction empty (no operations)
866 /// Number of operations in the transation
871 void set_osr(void *s
) {
882 * Helper object to parse Transactions.
884 * ObjectStore instances use this object to step down the encoded
885 * buffer decoding operation codes and parameters as we go.
894 bufferlist::iterator data_bl_p
;
897 vector
<coll_t
> colls
;
898 vector
<ghobject_t
> objects
;
901 explicit iterator(Transaction
*t
)
903 data_bl_p(t
->data_bl
.begin()),
904 colls(t
->coll_index
.size()),
905 objects(t
->object_index
.size()) {
908 op_buffer_p
= t
->op_bl
.get_contiguous(0, t
->data
.ops
* sizeof(Op
));
910 map
<coll_t
, __le32
>::iterator coll_index_p
;
911 for (coll_index_p
= t
->coll_index
.begin();
912 coll_index_p
!= t
->coll_index
.end();
914 colls
[coll_index_p
->second
] = coll_index_p
->first
;
917 map
<ghobject_t
, __le32
>::iterator object_index_p
;
918 for (object_index_p
= t
->object_index
.begin();
919 object_index_p
!= t
->object_index
.end();
921 objects
[object_index_p
->second
] = object_index_p
->first
;
925 friend class Transaction
;
935 Op
* op
= reinterpret_cast<Op
*>(op_buffer_p
);
936 op_buffer_p
+= sizeof(Op
);
941 string
decode_string() {
943 ::decode(s
, data_bl_p
);
946 void decode_bp(bufferptr
& bp
) {
947 ::decode(bp
, data_bl_p
);
949 void decode_bl(bufferlist
& bl
) {
950 ::decode(bl
, data_bl_p
);
952 void decode_attrset(map
<string
,bufferptr
>& aset
) {
953 ::decode(aset
, data_bl_p
);
955 void decode_attrset(map
<string
,bufferlist
>& aset
) {
956 ::decode(aset
, data_bl_p
);
958 void decode_attrset_bl(bufferlist
*pbl
) {
959 decode_str_str_map_to_bl(data_bl_p
, pbl
);
961 void decode_keyset(set
<string
> &keys
){
962 ::decode(keys
, data_bl_p
);
964 void decode_keyset_bl(bufferlist
*pbl
){
965 decode_str_set_to_bl(data_bl_p
, pbl
);
968 const ghobject_t
&get_oid(__le32 oid_id
) {
969 assert(oid_id
< objects
.size());
970 return objects
[oid_id
];
972 const coll_t
&get_cid(__le32 cid_id
) {
973 assert(cid_id
< colls
.size());
974 return colls
[cid_id
];
976 uint32_t get_fadvise_flags() const {
977 return t
->get_fadvise_flags();
982 return iterator(this);
986 void _build_actions_from_tbl();
989 * Helper functions to encode the various mutation elements of a
990 * transaction. These are 1:1 with the operation codes (see
991 * enumeration above). These routines ensure that the
992 * encoder/creator of a transaction gets the right data in the
993 * right place. Sadly, there's no corresponding version nor any
994 * form of seat belts for the decoder.
997 if (op_ptr
.length() == 0 || op_ptr
.offset() >= op_ptr
.length()) {
998 op_ptr
= bufferptr(sizeof(Op
) * OPS_PER_PTR
);
1000 bufferptr
ptr(op_ptr
, 0, sizeof(Op
));
1003 op_ptr
.set_offset(op_ptr
.offset() + sizeof(Op
));
1005 char* p
= ptr
.c_str();
1006 memset(p
, 0, sizeof(Op
));
1007 return reinterpret_cast<Op
*>(p
);
1009 __le32
_get_coll_id(const coll_t
& coll
) {
1010 map
<coll_t
, __le32
>::iterator c
= coll_index
.find(coll
);
1011 if (c
!= coll_index
.end())
1014 __le32 index_id
= coll_id
++;
1015 coll_index
[coll
] = index_id
;
1018 __le32
_get_object_id(const ghobject_t
& oid
) {
1019 map
<ghobject_t
, __le32
>::iterator o
= object_index
.find(oid
);
1020 if (o
!= object_index
.end())
1023 __le32 index_id
= object_id
++;
1024 object_index
[oid
] = index_id
;
1029 /// Commence a global file system sync operation.
1031 Op
* _op
= _get_next_op();
1032 _op
->op
= OP_STARTSYNC
;
1037 Op
* _op
= _get_next_op();
1044 * Ensure the existance of an object in a collection. Create an
1045 * empty object if necessary
1047 void touch(const coll_t
& cid
, const ghobject_t
& oid
) {
1048 Op
* _op
= _get_next_op();
1050 _op
->cid
= _get_coll_id(cid
);
1051 _op
->oid
= _get_object_id(oid
);
1055 * Write data to an offset within an object. If the object is too
1056 * small, it is expanded as needed. It is possible to specify an
1057 * offset beyond the current end of an object and it will be
1058 * expanded as needed. Simple implementations of ObjectStore will
1059 * just zero the data between the old end of the object and the
1060 * newly provided data. More sophisticated implementations of
1061 * ObjectStore will omit the untouched data and store it as a
1062 * "hole" in the file.
1064 void write(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t off
, uint64_t len
,
1065 const bufferlist
& write_data
, uint32_t flags
= 0) {
1066 uint32_t orig_len
= data_bl
.length();
1067 Op
* _op
= _get_next_op();
1069 _op
->cid
= _get_coll_id(cid
);
1070 _op
->oid
= _get_object_id(oid
);
1073 ::encode(write_data
, data_bl
);
1075 assert(len
== write_data
.length());
1076 data
.fadvise_flags
= data
.fadvise_flags
| flags
;
1077 if (write_data
.length() > data
.largest_data_len
) {
1078 data
.largest_data_len
= write_data
.length();
1079 data
.largest_data_off
= off
;
1080 data
.largest_data_off_in_data_bl
= orig_len
+ sizeof(__u32
); // we are about to
1085 * zero out the indicated byte range within an object. Some
1086 * ObjectStore instances may optimize this to release the
1087 * underlying storage space.
1089 void zero(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t off
, uint64_t len
) {
1090 Op
* _op
= _get_next_op();
1092 _op
->cid
= _get_coll_id(cid
);
1093 _op
->oid
= _get_object_id(oid
);
1098 /// Discard all data in the object beyond the specified size.
1099 void truncate(const coll_t
& cid
, const ghobject_t
& oid
, uint64_t off
) {
1100 Op
* _op
= _get_next_op();
1101 _op
->op
= OP_TRUNCATE
;
1102 _op
->cid
= _get_coll_id(cid
);
1103 _op
->oid
= _get_object_id(oid
);
1107 /// Remove an object. All four parts of the object are removed.
1108 void remove(const coll_t
& cid
, const ghobject_t
& oid
) {
1109 Op
* _op
= _get_next_op();
1110 _op
->op
= OP_REMOVE
;
1111 _op
->cid
= _get_coll_id(cid
);
1112 _op
->oid
= _get_object_id(oid
);
1115 /// Set an xattr of an object
1116 void setattr(const coll_t
& cid
, const ghobject_t
& oid
, const char* name
, bufferlist
& val
) {
1118 setattr(cid
, oid
, n
, val
);
1120 /// Set an xattr of an object
1121 void setattr(const coll_t
& cid
, const ghobject_t
& oid
, const string
& s
, bufferlist
& val
) {
1122 Op
* _op
= _get_next_op();
1123 _op
->op
= OP_SETATTR
;
1124 _op
->cid
= _get_coll_id(cid
);
1125 _op
->oid
= _get_object_id(oid
);
1126 ::encode(s
, data_bl
);
1127 ::encode(val
, data_bl
);
1130 /// Set multiple xattrs of an object
1131 void setattrs(const coll_t
& cid
, const ghobject_t
& oid
, const map
<string
,bufferptr
>& attrset
) {
1132 Op
* _op
= _get_next_op();
1133 _op
->op
= OP_SETATTRS
;
1134 _op
->cid
= _get_coll_id(cid
);
1135 _op
->oid
= _get_object_id(oid
);
1136 ::encode(attrset
, data_bl
);
1139 /// Set multiple xattrs of an object
1140 void setattrs(const coll_t
& cid
, const ghobject_t
& oid
, const map
<string
,bufferlist
>& attrset
) {
1141 Op
* _op
= _get_next_op();
1142 _op
->op
= OP_SETATTRS
;
1143 _op
->cid
= _get_coll_id(cid
);
1144 _op
->oid
= _get_object_id(oid
);
1145 ::encode(attrset
, data_bl
);
1148 /// remove an xattr from an object
1149 void rmattr(const coll_t
& cid
, const ghobject_t
& oid
, const char *name
) {
1151 rmattr(cid
, oid
, n
);
1153 /// remove an xattr from an object
1154 void rmattr(const coll_t
& cid
, const ghobject_t
& oid
, const string
& s
) {
1155 Op
* _op
= _get_next_op();
1156 _op
->op
= OP_RMATTR
;
1157 _op
->cid
= _get_coll_id(cid
);
1158 _op
->oid
= _get_object_id(oid
);
1159 ::encode(s
, data_bl
);
1162 /// remove all xattrs from an object
1163 void rmattrs(const coll_t
& cid
, const ghobject_t
& oid
) {
1164 Op
* _op
= _get_next_op();
1165 _op
->op
= OP_RMATTRS
;
1166 _op
->cid
= _get_coll_id(cid
);
1167 _op
->oid
= _get_object_id(oid
);
1171 * Clone an object into another object.
1173 * Low-cost (e.g., O(1)) cloning (if supported) is best, but
1174 * fallback to an O(n) copy is allowed. All four parts of the
1175 * object are cloned (data, xattrs, omap header, omap
1178 * The destination named object may already exist, in
1179 * which case its previous contents are discarded.
1181 void clone(const coll_t
& cid
, const ghobject_t
& oid
,
1182 const ghobject_t
& noid
) {
1183 Op
* _op
= _get_next_op();
1185 _op
->cid
= _get_coll_id(cid
);
1186 _op
->oid
= _get_object_id(oid
);
1187 _op
->dest_oid
= _get_object_id(noid
);
1191 * Clone a byte range from one object to another.
1193 * The data portion of the destination object receives a copy of a
1194 * portion of the data from the source object. None of the other
1195 * three parts of an object is copied from the source.
1197 * The destination object size may be extended to the dstoff + len.
1199 * The source range *must* overlap with the source object data. If it does
1200 * not the result is undefined.
1202 void clone_range(const coll_t
& cid
, const ghobject_t
& oid
,
1203 const ghobject_t
& noid
,
1204 uint64_t srcoff
, uint64_t srclen
, uint64_t dstoff
) {
1205 Op
* _op
= _get_next_op();
1206 _op
->op
= OP_CLONERANGE2
;
1207 _op
->cid
= _get_coll_id(cid
);
1208 _op
->oid
= _get_object_id(oid
);
1209 _op
->dest_oid
= _get_object_id(noid
);
1212 _op
->dest_off
= dstoff
;
1216 /// Create the collection
1217 void create_collection(const coll_t
& cid
, int bits
) {
1218 Op
* _op
= _get_next_op();
1219 _op
->op
= OP_MKCOLL
;
1220 _op
->cid
= _get_coll_id(cid
);
1221 _op
->split_bits
= bits
;
1226 * Give the collection a hint.
1228 * @param cid - collection id.
1229 * @param type - hint type.
1230 * @param hint - the hint payload, which contains the customized
1231 * data along with the hint type.
1233 void collection_hint(const coll_t
& cid
, uint32_t type
, const bufferlist
& hint
) {
1234 Op
* _op
= _get_next_op();
1235 _op
->op
= OP_COLL_HINT
;
1236 _op
->cid
= _get_coll_id(cid
);
1237 _op
->hint_type
= type
;
1238 ::encode(hint
, data_bl
);
1242 /// remove the collection, the collection must be empty
1243 void remove_collection(const coll_t
& cid
) {
1244 Op
* _op
= _get_next_op();
1245 _op
->op
= OP_RMCOLL
;
1246 _op
->cid
= _get_coll_id(cid
);
1249 void collection_move(const coll_t
& cid
, coll_t oldcid
, const ghobject_t
& oid
)
1250 __attribute__ ((deprecated
)) {
1251 // NOTE: we encode this as a fixed combo of ADD + REMOVE. they
1252 // always appear together, so this is effectively a single MOVE.
1253 Op
* _op
= _get_next_op();
1254 _op
->op
= OP_COLL_ADD
;
1255 _op
->cid
= _get_coll_id(oldcid
);
1256 _op
->oid
= _get_object_id(oid
);
1257 _op
->dest_cid
= _get_coll_id(cid
);
1260 _op
= _get_next_op();
1261 _op
->op
= OP_COLL_REMOVE
;
1262 _op
->cid
= _get_coll_id(oldcid
);
1263 _op
->oid
= _get_object_id(oid
);
1266 void collection_move_rename(const coll_t
& oldcid
, const ghobject_t
& oldoid
,
1267 coll_t cid
, const ghobject_t
& oid
) {
1268 Op
* _op
= _get_next_op();
1269 _op
->op
= OP_COLL_MOVE_RENAME
;
1270 _op
->cid
= _get_coll_id(oldcid
);
1271 _op
->oid
= _get_object_id(oldoid
);
1272 _op
->dest_cid
= _get_coll_id(cid
);
1273 _op
->dest_oid
= _get_object_id(oid
);
1276 void try_rename(coll_t cid
, const ghobject_t
& oldoid
,
1277 const ghobject_t
& oid
) {
1278 Op
* _op
= _get_next_op();
1279 _op
->op
= OP_TRY_RENAME
;
1280 _op
->cid
= _get_coll_id(cid
);
1281 _op
->oid
= _get_object_id(oldoid
);
1282 _op
->dest_oid
= _get_object_id(oid
);
1286 /// Remove omap from oid
1288 coll_t cid
, ///< [in] Collection containing oid
1289 const ghobject_t
&oid
///< [in] Object from which to remove omap
1291 Op
* _op
= _get_next_op();
1292 _op
->op
= OP_OMAP_CLEAR
;
1293 _op
->cid
= _get_coll_id(cid
);
1294 _op
->oid
= _get_object_id(oid
);
1297 /// Set keys on oid omap. Replaces duplicate keys.
1299 const coll_t
& cid
, ///< [in] Collection containing oid
1300 const ghobject_t
&oid
, ///< [in] Object to update
1301 const map
<string
, bufferlist
> &attrset
///< [in] Replacement keys and values
1303 Op
* _op
= _get_next_op();
1304 _op
->op
= OP_OMAP_SETKEYS
;
1305 _op
->cid
= _get_coll_id(cid
);
1306 _op
->oid
= _get_object_id(oid
);
1307 ::encode(attrset
, data_bl
);
1311 /// Set keys on an oid omap (bufferlist variant).
1313 coll_t cid
, ///< [in] Collection containing oid
1314 const ghobject_t
&oid
, ///< [in] Object to update
1315 const bufferlist
&attrset_bl
///< [in] Replacement keys and values
1317 Op
* _op
= _get_next_op();
1318 _op
->op
= OP_OMAP_SETKEYS
;
1319 _op
->cid
= _get_coll_id(cid
);
1320 _op
->oid
= _get_object_id(oid
);
1321 data_bl
.append(attrset_bl
);
1325 /// Remove keys from oid omap
1327 coll_t cid
, ///< [in] Collection containing oid
1328 const ghobject_t
&oid
, ///< [in] Object from which to remove the omap
1329 const set
<string
> &keys
///< [in] Keys to clear
1331 Op
* _op
= _get_next_op();
1332 _op
->op
= OP_OMAP_RMKEYS
;
1333 _op
->cid
= _get_coll_id(cid
);
1334 _op
->oid
= _get_object_id(oid
);
1335 ::encode(keys
, data_bl
);
1339 /// Remove keys from oid omap
1341 coll_t cid
, ///< [in] Collection containing oid
1342 const ghobject_t
&oid
, ///< [in] Object from which to remove the omap
1343 const bufferlist
&keys_bl
///< [in] Keys to clear
1345 Op
* _op
= _get_next_op();
1346 _op
->op
= OP_OMAP_RMKEYS
;
1347 _op
->cid
= _get_coll_id(cid
);
1348 _op
->oid
= _get_object_id(oid
);
1349 data_bl
.append(keys_bl
);
1353 /// Remove key range from oid omap
1354 void omap_rmkeyrange(
1355 coll_t cid
, ///< [in] Collection containing oid
1356 const ghobject_t
&oid
, ///< [in] Object from which to remove the omap keys
1357 const string
& first
, ///< [in] first key in range
1358 const string
& last
///< [in] first key past range, range is [first,last)
1360 Op
* _op
= _get_next_op();
1361 _op
->op
= OP_OMAP_RMKEYRANGE
;
1362 _op
->cid
= _get_coll_id(cid
);
1363 _op
->oid
= _get_object_id(oid
);
1364 ::encode(first
, data_bl
);
1365 ::encode(last
, data_bl
);
1370 void omap_setheader(
1371 coll_t cid
, ///< [in] Collection containing oid
1372 const ghobject_t
&oid
, ///< [in] Object
1373 const bufferlist
&bl
///< [in] Header value
1375 Op
* _op
= _get_next_op();
1376 _op
->op
= OP_OMAP_SETHEADER
;
1377 _op
->cid
= _get_coll_id(cid
);
1378 _op
->oid
= _get_object_id(oid
);
1379 ::encode(bl
, data_bl
);
1383 /// Split collection based on given prefixes, objects matching the specified bits/rem are
1384 /// moved to the new collection
1385 void split_collection(
1389 coll_t destination
) {
1390 Op
* _op
= _get_next_op();
1391 _op
->op
= OP_SPLIT_COLLECTION2
;
1392 _op
->cid
= _get_coll_id(cid
);
1393 _op
->dest_cid
= _get_coll_id(destination
);
1394 _op
->split_bits
= bits
;
1395 _op
->split_rem
= rem
;
1399 void collection_set_bits(
1402 Op
* _op
= _get_next_op();
1403 _op
->op
= OP_COLL_SET_BITS
;
1404 _op
->cid
= _get_coll_id(cid
);
1405 _op
->split_bits
= bits
;
1409 /// Set allocation hint for an object
1410 /// make 0 values(expected_object_size, expected_write_size) noops for all implementations
1411 void set_alloc_hint(
1413 const ghobject_t
&oid
,
1414 uint64_t expected_object_size
,
1415 uint64_t expected_write_size
,
1418 Op
* _op
= _get_next_op();
1419 _op
->op
= OP_SETALLOCHINT
;
1420 _op
->cid
= _get_coll_id(cid
);
1421 _op
->oid
= _get_object_id(oid
);
1422 _op
->expected_object_size
= expected_object_size
;
1423 _op
->expected_write_size
= expected_write_size
;
1424 _op
->alloc_hint_flags
= flags
;
1428 void encode(bufferlist
& bl
) const {
1429 //layout: data_bl + op_bl + coll_index + object_index + data
1430 ENCODE_START(9, 9, bl
);
1431 ::encode(data_bl
, bl
);
1432 ::encode(op_bl
, bl
);
1433 ::encode(coll_index
, bl
);
1434 ::encode(object_index
, bl
);
1439 void decode(bufferlist::iterator
&bl
) {
1440 DECODE_START(9, bl
);
1443 ::decode(data_bl
, bl
);
1444 ::decode(op_bl
, bl
);
1445 ::decode(coll_index
, bl
);
1446 ::decode(object_index
, bl
);
1448 coll_id
= coll_index
.size();
1449 object_id
= object_index
.size();
1454 void dump(ceph::Formatter
*f
);
1455 static void generate_test_instances(list
<Transaction
*>& o
);
1458 // synchronous wrappers
1459 unsigned apply_transaction(Sequencer
*osr
, Transaction
&& t
, Context
*ondisk
=0) {
1460 vector
<Transaction
> tls
;
1461 tls
.push_back(std::move(t
));
1462 return apply_transactions(osr
, tls
, ondisk
);
1464 unsigned apply_transactions(Sequencer
*osr
, vector
<Transaction
>& tls
, Context
*ondisk
=0);
1466 int queue_transaction(Sequencer
*osr
, Transaction
&& t
, Context
*onreadable
, Context
*ondisk
=0,
1467 Context
*onreadable_sync
=0,
1468 TrackedOpRef op
= TrackedOpRef(),
1469 ThreadPool::TPHandle
*handle
= NULL
) {
1470 vector
<Transaction
> tls
;
1471 tls
.push_back(std::move(t
));
1472 return queue_transactions(osr
, tls
, onreadable
, ondisk
, onreadable_sync
,
1476 int queue_transactions(Sequencer
*osr
, vector
<Transaction
>& tls
,
1477 Context
*onreadable
, Context
*ondisk
=0,
1478 Context
*onreadable_sync
=0,
1479 TrackedOpRef op
= TrackedOpRef(),
1480 ThreadPool::TPHandle
*handle
= NULL
) {
1481 assert(!tls
.empty());
1482 tls
.back().register_on_applied(onreadable
);
1483 tls
.back().register_on_commit(ondisk
);
1484 tls
.back().register_on_applied_sync(onreadable_sync
);
1485 return queue_transactions(osr
, tls
, op
, handle
);
1488 virtual int queue_transactions(
1489 Sequencer
*osr
, vector
<Transaction
>& tls
,
1490 TrackedOpRef op
= TrackedOpRef(),
1491 ThreadPool::TPHandle
*handle
= NULL
) = 0;
1494 int queue_transactions(
1496 vector
<Transaction
>& tls
,
1497 Context
*onreadable
,
1499 Context
*onreadable_sync
,
1500 Context
*oncomplete
,
1503 int queue_transaction(
1506 Context
*onreadable
,
1508 Context
*onreadable_sync
,
1509 Context
*oncomplete
,
1512 vector
<Transaction
> tls
;
1513 tls
.push_back(std::move(t
));
1514 return queue_transactions(
1515 osr
, tls
, onreadable
, oncommit
, onreadable_sync
, oncomplete
, op
);
1519 ObjectStore(CephContext
* cct
,
1520 const std::string
& path_
) : path(path_
), cct(cct
) {}
1521 virtual ~ObjectStore() {}
1524 explicit ObjectStore(const ObjectStore
& o
) = delete;
1525 const ObjectStore
& operator=(const ObjectStore
& o
) = delete;
1528 virtual int upgrade() {
1532 virtual void get_db_statistics(Formatter
*f
) { }
1533 virtual void generate_db_histogram(Formatter
*f
) { }
1534 virtual void flush_cache() { }
1535 virtual void dump_perf_counters(Formatter
*f
) {}
1537 virtual string
get_type() = 0;
1540 virtual bool test_mount_in_use() = 0;
1541 virtual int mount() = 0;
1542 virtual int umount() = 0;
1543 virtual int fsck(bool deep
) {
1547 virtual void set_cache_shards(unsigned num
) { }
1550 * Returns 0 if the hobject is valid, -error otherwise
1553 * -ENAMETOOLONG: locator/namespace/name too large
1555 virtual int validate_hobject_key(const hobject_t
&obj
) const = 0;
1557 virtual unsigned get_max_attr_name_length() = 0;
1558 virtual int mkfs() = 0; // wipe
1559 virtual int mkjournal() = 0; // journal only
1560 virtual bool needs_journal() = 0; //< requires a journal
1561 virtual bool wants_journal() = 0; //< prefers a journal
1562 virtual bool allows_journal() = 0; //< allows a journal
1567 * Check whether store is backed by a rotational (HDD) or non-rotational
1570 * This must be usable *before* the store is mounted.
1572 * @return true for HDD, false for SSD
1574 virtual bool is_rotational() {
1578 virtual string
get_default_device_class() {
1579 return is_rotational() ? "hdd" : "ssd";
1582 virtual bool can_sort_nibblewise() {
1583 return false; // assume a backend cannot, unless it says otherwise
1586 virtual int statfs(struct store_statfs_t
*buf
) = 0;
1588 virtual void collect_metadata(map
<string
,string
> *pm
) { }
1591 * write_meta - write a simple configuration key out-of-band
1593 * Write a simple key/value pair for basic store configuration
1594 * (e.g., a uuid or magic number) to an unopened/unmounted store.
1595 * The default implementation writes this to a plaintext file in the
1598 * A newline is appended.
1600 * @param key key name (e.g., "fsid")
1601 * @param value value (e.g., a uuid rendered as a string)
1602 * @returns 0 for success, or an error code
1604 virtual int write_meta(const std::string
& key
,
1605 const std::string
& value
);
1608 * read_meta - read a simple configuration key out-of-band
1610 * Read a simple key value to an unopened/mounted store.
1612 * Trailing whitespace is stripped off.
1614 * @param key key name
1615 * @param value pointer to value string
1616 * @returns 0 for success, or an error code
1618 virtual int read_meta(const std::string
& key
,
1619 std::string
*value
);
1622 * get ideal max value for collection_list()
1624 * default to some arbitrary values; the implementation will override.
1626 virtual int get_ideal_list_max() { return 64; }
1630 * get a collection handle
1632 * Provide a trivial handle as a default to avoid converting legacy
1635 virtual CollectionHandle
open_collection(const coll_t
&cid
) {
1636 return new CompatCollectionHandle(cid
);
1641 * Synchronous read operations
1645 * exists -- Test for existance of object
1647 * @param cid collection for object
1648 * @param oid oid of object
1649 * @returns true if object exists, false otherwise
1651 virtual bool exists(const coll_t
& cid
, const ghobject_t
& oid
) = 0; // useful?
1652 virtual bool exists(CollectionHandle
& c
, const ghobject_t
& oid
) {
1653 return exists(c
->get_cid(), oid
);
1656 * set_collection_opts -- set pool options for a collectioninformation for an object
1658 * @param cid collection
1659 * @param opts new collection options
1660 * @returns 0 on success, negative error code on failure.
1662 virtual int set_collection_opts(
1664 const pool_opts_t
& opts
) = 0;
1667 * stat -- get information for an object
1669 * @param cid collection for object
1670 * @param oid oid of object
1671 * @param st output information for the object
1672 * @param allow_eio if false, assert on -EIO operation failure
1673 * @returns 0 on success, negative error code on failure.
1677 const ghobject_t
& oid
,
1679 bool allow_eio
= false) = 0; // struct stat?
1681 CollectionHandle
&c
,
1682 const ghobject_t
& oid
,
1684 bool allow_eio
= false) {
1685 return stat(c
->get_cid(), oid
, st
, allow_eio
);
1689 * read -- read a byte range of data from an object
1691 * Note: if reading from an offset past the end of the object, we
1692 * return 0 (not, say, -EINVAL).
1694 * @param cid collection for object
1695 * @param oid oid of object
1696 * @param offset location offset of first byte to be read
1697 * @param len number of bytes to be read
1698 * @param bl output bufferlist
1699 * @param op_flags is CEPH_OSD_OP_FLAG_*
1700 * @param allow_eio if false, assert on -EIO operation failure
1701 * @returns number of bytes read on success, or negative error code on failure.
1705 const ghobject_t
& oid
,
1709 uint32_t op_flags
= 0) = 0;
1711 CollectionHandle
&c
,
1712 const ghobject_t
& oid
,
1716 uint32_t op_flags
= 0) {
1717 return read(c
->get_cid(), oid
, offset
, len
, bl
, op_flags
);
1721 * fiemap -- get extent map of data of an object
1723 * Returns an encoded map of the extents of an object's data portion
1724 * (map<offset,size>).
1726 * A non-enlightened implementation is free to return the extent (offset, len)
1727 * as the sole extent.
1729 * @param cid collection for object
1730 * @param oid oid of object
1731 * @param offset location offset of first byte to be read
1732 * @param len number of bytes to be read
1733 * @param bl output bufferlist for extent map information.
1734 * @returns 0 on success, negative error code on failure.
1736 virtual int fiemap(const coll_t
& cid
, const ghobject_t
& oid
,
1737 uint64_t offset
, size_t len
, bufferlist
& bl
) = 0;
1738 virtual int fiemap(const coll_t
& cid
, const ghobject_t
& oid
,
1739 uint64_t offset
, size_t len
,
1740 map
<uint64_t, uint64_t>& destmap
) = 0;
1741 virtual int fiemap(CollectionHandle
& c
, const ghobject_t
& oid
,
1742 uint64_t offset
, size_t len
, bufferlist
& bl
) {
1743 return fiemap(c
->get_cid(), oid
, offset
, len
, bl
);
1745 virtual int fiemap(CollectionHandle
& c
, const ghobject_t
& oid
,
1746 uint64_t offset
, size_t len
, map
<uint64_t, uint64_t>& destmap
) {
1747 return fiemap(c
->get_cid(), oid
, offset
, len
, destmap
);
1751 * getattr -- get an xattr of an object
1753 * @param cid collection for object
1754 * @param oid oid of object
1755 * @param name name of attr to read
1756 * @param value place to put output result.
1757 * @returns 0 on success, negative error code on failure.
1759 virtual int getattr(const coll_t
& cid
, const ghobject_t
& oid
,
1760 const char *name
, bufferptr
& value
) = 0;
1761 virtual int getattr(CollectionHandle
&c
, const ghobject_t
& oid
,
1762 const char *name
, bufferptr
& value
) {
1763 return getattr(c
->get_cid(), oid
, name
, value
);
1767 * getattr -- get an xattr of an object
1769 * @param cid collection for object
1770 * @param oid oid of object
1771 * @param name name of attr to read
1772 * @param value place to put output result.
1773 * @returns 0 on success, negative error code on failure.
1775 int getattr(const coll_t
& cid
, const ghobject_t
& oid
, const char *name
, bufferlist
& value
) {
1777 int r
= getattr(cid
, oid
, name
, bp
);
1779 value
.push_back(bp
);
1783 coll_t cid
, const ghobject_t
& oid
,
1784 const string
& name
, bufferlist
& value
) {
1786 int r
= getattr(cid
, oid
, name
.c_str(), bp
);
1787 value
.push_back(bp
);
1791 CollectionHandle
&c
, const ghobject_t
& oid
,
1792 const string
& name
, bufferlist
& value
) {
1794 int r
= getattr(c
, oid
, name
.c_str(), bp
);
1795 value
.push_back(bp
);
1800 * getattrs -- get all of the xattrs of an object
1802 * @param cid collection for object
1803 * @param oid oid of object
1804 * @param aset place to put output result.
1805 * @returns 0 on success, negative error code on failure.
1807 virtual int getattrs(const coll_t
& cid
, const ghobject_t
& oid
,
1808 map
<string
,bufferptr
>& aset
) = 0;
1809 virtual int getattrs(CollectionHandle
&c
, const ghobject_t
& oid
,
1810 map
<string
,bufferptr
>& aset
) {
1811 return getattrs(c
->get_cid(), oid
, aset
);
1815 * getattrs -- get all of the xattrs of an object
1817 * @param cid collection for object
1818 * @param oid oid of object
1819 * @param aset place to put output result.
1820 * @returns 0 on success, negative error code on failure.
1822 int getattrs(const coll_t
& cid
, const ghobject_t
& oid
, map
<string
,bufferlist
>& aset
) {
1823 map
<string
,bufferptr
> bmap
;
1824 int r
= getattrs(cid
, oid
, bmap
);
1825 for (map
<string
,bufferptr
>::iterator i
= bmap
.begin();
1828 aset
[i
->first
].append(i
->second
);
1832 int getattrs(CollectionHandle
&c
, const ghobject_t
& oid
,
1833 map
<string
,bufferlist
>& aset
) {
1834 map
<string
,bufferptr
> bmap
;
1835 int r
= getattrs(c
, oid
, bmap
);
1836 for (map
<string
,bufferptr
>::iterator i
= bmap
.begin();
1839 aset
[i
->first
].append(i
->second
);
1848 * list_collections -- get all of the collections known to this ObjectStore
1850 * @param ls list of the collections in sorted order.
1851 * @returns 0 on success, negative error code on failure.
1853 virtual int list_collections(vector
<coll_t
>& ls
) = 0;
1856 * does a collection exist?
1858 * @param c collection
1859 * @returns true if it exists, false otherwise
1861 virtual bool collection_exists(const coll_t
& c
) = 0;
1864 * is a collection empty?
1866 * @param c collection
1867 * @param empty true if the specified collection is empty, false otherwise
1868 * @returns 0 on success, negative error code on failure.
1870 virtual int collection_empty(const coll_t
& c
, bool *empty
) = 0;
1873 * return the number of significant bits of the coll_t::pgid.
1875 * This should return what the last create_collection or split_collection
1876 * set. A legacy backend may return -EAGAIN if the value is unavailable
1877 * (because we upgraded from an older version, e.g., FileStore).
1879 virtual int collection_bits(const coll_t
& c
) = 0;
1883 * list contents of a collection that fall in the range [start, end) and no more than a specified many result
1885 * @param c collection
1886 * @param start list object that sort >= this value
1887 * @param end list objects that sort < this value
1888 * @param max return no more than this many results
1889 * @param seq return no objects with snap < seq
1890 * @param ls [out] result
1891 * @param next [out] next item sorts >= this value
1892 * @return zero on success, or negative error
1894 virtual int collection_list(const coll_t
& c
,
1895 const ghobject_t
& start
, const ghobject_t
& end
,
1897 vector
<ghobject_t
> *ls
, ghobject_t
*next
) = 0;
1898 virtual int collection_list(CollectionHandle
&c
,
1899 const ghobject_t
& start
, const ghobject_t
& end
,
1901 vector
<ghobject_t
> *ls
, ghobject_t
*next
) {
1902 return collection_list(c
->get_cid(), start
, end
, max
, ls
, next
);
1907 /// Get omap contents
1908 virtual int omap_get(
1909 const coll_t
& c
, ///< [in] Collection containing oid
1910 const ghobject_t
&oid
, ///< [in] Object containing omap
1911 bufferlist
*header
, ///< [out] omap header
1912 map
<string
, bufferlist
> *out
/// < [out] Key to value map
1914 virtual int omap_get(
1915 CollectionHandle
&c
, ///< [in] Collection containing oid
1916 const ghobject_t
&oid
, ///< [in] Object containing omap
1917 bufferlist
*header
, ///< [out] omap header
1918 map
<string
, bufferlist
> *out
/// < [out] Key to value map
1920 return omap_get(c
->get_cid(), oid
, header
, out
);
1924 virtual int omap_get_header(
1925 const coll_t
& c
, ///< [in] Collection containing oid
1926 const ghobject_t
&oid
, ///< [in] Object containing omap
1927 bufferlist
*header
, ///< [out] omap header
1928 bool allow_eio
= false ///< [in] don't assert on eio
1930 virtual int omap_get_header(
1931 CollectionHandle
&c
, ///< [in] Collection containing oid
1932 const ghobject_t
&oid
, ///< [in] Object containing omap
1933 bufferlist
*header
, ///< [out] omap header
1934 bool allow_eio
= false ///< [in] don't assert on eio
1936 return omap_get_header(c
->get_cid(), oid
, header
, allow_eio
);
1939 /// Get keys defined on oid
1940 virtual int omap_get_keys(
1941 const coll_t
& c
, ///< [in] Collection containing oid
1942 const ghobject_t
&oid
, ///< [in] Object containing omap
1943 set
<string
> *keys
///< [out] Keys defined on oid
1945 virtual int omap_get_keys(
1946 CollectionHandle
&c
, ///< [in] Collection containing oid
1947 const ghobject_t
&oid
, ///< [in] Object containing omap
1948 set
<string
> *keys
///< [out] Keys defined on oid
1950 return omap_get_keys(c
->get_cid(), oid
, keys
);
1954 virtual int omap_get_values(
1955 const coll_t
& c
, ///< [in] Collection containing oid
1956 const ghobject_t
&oid
, ///< [in] Object containing omap
1957 const set
<string
> &keys
, ///< [in] Keys to get
1958 map
<string
, bufferlist
> *out
///< [out] Returned keys and values
1960 virtual int omap_get_values(
1961 CollectionHandle
&c
, ///< [in] Collection containing oid
1962 const ghobject_t
&oid
, ///< [in] Object containing omap
1963 const set
<string
> &keys
, ///< [in] Keys to get
1964 map
<string
, bufferlist
> *out
///< [out] Returned keys and values
1966 return omap_get_values(c
->get_cid(), oid
, keys
, out
);
1969 /// Filters keys into out which are defined on oid
1970 virtual int omap_check_keys(
1971 const coll_t
& c
, ///< [in] Collection containing oid
1972 const ghobject_t
&oid
, ///< [in] Object containing omap
1973 const set
<string
> &keys
, ///< [in] Keys to check
1974 set
<string
> *out
///< [out] Subset of keys defined on oid
1976 virtual int omap_check_keys(
1977 CollectionHandle
&c
, ///< [in] Collection containing oid
1978 const ghobject_t
&oid
, ///< [in] Object containing omap
1979 const set
<string
> &keys
, ///< [in] Keys to check
1980 set
<string
> *out
///< [out] Subset of keys defined on oid
1982 return omap_check_keys(c
->get_cid(), oid
, keys
, out
);
1986 * Returns an object map iterator
1988 * Warning! The returned iterator is an implicit lock on filestore
1989 * operations in c. Do not use filestore methods on c while the returned
1990 * iterator is live. (Filling in a transaction is no problem).
1992 * @return iterator, null on error
1994 virtual ObjectMap::ObjectMapIterator
get_omap_iterator(
1995 const coll_t
& c
, ///< [in] collection
1996 const ghobject_t
&oid
///< [in] object
1998 virtual ObjectMap::ObjectMapIterator
get_omap_iterator(
1999 CollectionHandle
&c
, ///< [in] collection
2000 const ghobject_t
&oid
///< [in] object
2002 return get_omap_iterator(c
->get_cid(), oid
);
2005 virtual int flush_journal() { return -EOPNOTSUPP
; }
2007 virtual int dump_journal(ostream
& out
) { return -EOPNOTSUPP
; }
2009 virtual int snapshot(const string
& name
) { return -EOPNOTSUPP
; }
2012 * Set and get internal fsid for this instance. No external data is modified
2014 virtual void set_fsid(uuid_d u
) = 0;
2015 virtual uuid_d
get_fsid() = 0;
2018 * Estimates additional disk space used by the specified amount of objects and caused by file allocation granularity and metadata store
2019 * - num objects - total (including witeouts) object count to measure used space for.
2021 virtual uint64_t estimate_objects_overhead(uint64_t num_objects
) = 0;
2025 virtual void inject_data_error(const ghobject_t
&oid
) {}
2026 virtual void inject_mdata_error(const ghobject_t
&oid
) {}
2028 virtual void compact() {}
2030 WRITE_CLASS_ENCODER(ObjectStore::Transaction
)
2031 WRITE_CLASS_ENCODER(ObjectStore::Transaction::TransactionData
)
2033 static inline void intrusive_ptr_add_ref(ObjectStore::Sequencer_impl
*s
) {
2036 static inline void intrusive_ptr_release(ObjectStore::Sequencer_impl
*s
) {
2040 ostream
& operator<<(ostream
& out
, const ObjectStore::Sequencer
& s
);
2041 ostream
& operator<<(ostream
& out
, const ObjectStore::Transaction
& tx
);