]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/util/cleanable.cc
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / rocksdb / util / cleanable.cc
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #include "rocksdb/cleanable.h"
11
12 #include <atomic>
13 #include <cassert>
14 #include <utility>
15
16 namespace ROCKSDB_NAMESPACE {
17
18 Cleanable::Cleanable() {
19 cleanup_.function = nullptr;
20 cleanup_.next = nullptr;
21 }
22
23 Cleanable::~Cleanable() { DoCleanup(); }
24
25 Cleanable::Cleanable(Cleanable&& other) noexcept { *this = std::move(other); }
26
27 Cleanable& Cleanable::operator=(Cleanable&& other) noexcept {
28 assert(this != &other); // https://stackoverflow.com/a/9322542/454544
29 cleanup_ = other.cleanup_;
30 other.cleanup_.function = nullptr;
31 other.cleanup_.next = nullptr;
32 return *this;
33 }
34
35 // If the entire linked list was on heap we could have simply add attach one
36 // link list to another. However the head is an embeded object to avoid the cost
37 // of creating objects for most of the use cases when the Cleanable has only one
38 // Cleanup to do. We could put evernything on heap if benchmarks show no
39 // negative impact on performance.
40 // Also we need to iterate on the linked list since there is no pointer to the
41 // tail. We can add the tail pointer but maintainin it might negatively impact
42 // the perforamnce for the common case of one cleanup where tail pointer is not
43 // needed. Again benchmarks could clarify that.
44 // Even without a tail pointer we could iterate on the list, find the tail, and
45 // have only that node updated without the need to insert the Cleanups one by
46 // one. This however would be redundant when the source Cleanable has one or a
47 // few Cleanups which is the case most of the time.
48 // TODO(myabandeh): if the list is too long we should maintain a tail pointer
49 // and have the entire list (minus the head that has to be inserted separately)
50 // merged with the target linked list at once.
51 void Cleanable::DelegateCleanupsTo(Cleanable* other) {
52 assert(other != nullptr);
53 if (cleanup_.function == nullptr) {
54 return;
55 }
56 Cleanup* c = &cleanup_;
57 other->RegisterCleanup(c->function, c->arg1, c->arg2);
58 c = c->next;
59 while (c != nullptr) {
60 Cleanup* next = c->next;
61 other->RegisterCleanup(c);
62 c = next;
63 }
64 cleanup_.function = nullptr;
65 cleanup_.next = nullptr;
66 }
67
68 void Cleanable::RegisterCleanup(Cleanable::Cleanup* c) {
69 assert(c != nullptr);
70 if (cleanup_.function == nullptr) {
71 cleanup_.function = c->function;
72 cleanup_.arg1 = c->arg1;
73 cleanup_.arg2 = c->arg2;
74 delete c;
75 } else {
76 c->next = cleanup_.next;
77 cleanup_.next = c;
78 }
79 }
80
81 void Cleanable::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
82 assert(func != nullptr);
83 Cleanup* c;
84 if (cleanup_.function == nullptr) {
85 c = &cleanup_;
86 } else {
87 c = new Cleanup;
88 c->next = cleanup_.next;
89 cleanup_.next = c;
90 }
91 c->function = func;
92 c->arg1 = arg1;
93 c->arg2 = arg2;
94 }
95
96 struct SharedCleanablePtr::Impl : public Cleanable {
97 std::atomic<unsigned> ref_count{1}; // Start with 1 ref
98 void Ref() { ref_count.fetch_add(1, std::memory_order_relaxed); }
99 void Unref() {
100 if (ref_count.fetch_sub(1, std::memory_order_relaxed) == 1) {
101 // Last ref
102 delete this;
103 }
104 }
105 static void UnrefWrapper(void* arg1, void* /*arg2*/) {
106 static_cast<SharedCleanablePtr::Impl*>(arg1)->Unref();
107 }
108 };
109
110 void SharedCleanablePtr::Reset() {
111 if (ptr_) {
112 ptr_->Unref();
113 ptr_ = nullptr;
114 }
115 }
116
117 void SharedCleanablePtr::Allocate() {
118 Reset();
119 ptr_ = new Impl();
120 }
121
122 SharedCleanablePtr::SharedCleanablePtr(const SharedCleanablePtr& from) {
123 *this = from;
124 }
125
126 SharedCleanablePtr::SharedCleanablePtr(SharedCleanablePtr&& from) noexcept {
127 *this = std::move(from);
128 }
129
130 SharedCleanablePtr& SharedCleanablePtr::operator=(
131 const SharedCleanablePtr& from) {
132 if (this != &from) {
133 Reset();
134 ptr_ = from.ptr_;
135 if (ptr_) {
136 ptr_->Ref();
137 }
138 }
139 return *this;
140 }
141
142 SharedCleanablePtr& SharedCleanablePtr::operator=(
143 SharedCleanablePtr&& from) noexcept {
144 assert(this != &from); // https://stackoverflow.com/a/9322542/454544
145 Reset();
146 ptr_ = from.ptr_;
147 from.ptr_ = nullptr;
148 return *this;
149 }
150
151 SharedCleanablePtr::~SharedCleanablePtr() { Reset(); }
152
153 Cleanable& SharedCleanablePtr::operator*() {
154 return *ptr_; // implicit upcast
155 }
156
157 Cleanable* SharedCleanablePtr::operator->() {
158 return ptr_; // implicit upcast
159 }
160
161 Cleanable* SharedCleanablePtr::get() {
162 return ptr_; // implicit upcast
163 }
164
165 void SharedCleanablePtr::RegisterCopyWith(Cleanable* target) {
166 if (ptr_) {
167 // "Virtual" copy of the pointer
168 ptr_->Ref();
169 target->RegisterCleanup(&Impl::UnrefWrapper, ptr_, nullptr);
170 }
171 }
172
173 void SharedCleanablePtr::MoveAsCleanupTo(Cleanable* target) {
174 if (ptr_) {
175 // "Virtual" move of the pointer
176 target->RegisterCleanup(&Impl::UnrefWrapper, ptr_, nullptr);
177 ptr_ = nullptr;
178 }
179 }
180
181 } // namespace ROCKSDB_NAMESPACE