]> git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/util/arena.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / rocksdb / util / arena.h
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under the BSD-style license found in the
3 // LICENSE file in the root directory of this source tree. An additional grant
4 // of patent rights can be found in the PATENTS file in the same directory.
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 // Arena is an implementation of Allocator class. For a request of small size,
11 // it allocates a block with pre-defined block size. For a request of big
12 // size, it uses malloc to directly get the requested size.
13
14 #pragma once
15 #ifndef OS_WIN
16 #include <sys/mman.h>
17 #endif
18 #include <cstddef>
19 #include <cerrno>
20 #include <vector>
21 #include <assert.h>
22 #include <stdint.h>
23 #include "util/allocator.h"
24 #include "util/mutexlock.h"
25
26 namespace rocksdb {
27
28 class Arena : public Allocator {
29 public:
30 // No copying allowed
31 Arena(const Arena&) = delete;
32 void operator=(const Arena&) = delete;
33
34 static const size_t kInlineSize = 2048;
35 static const size_t kMinBlockSize;
36 static const size_t kMaxBlockSize;
37
38 // huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the
39 // supported hugepage size of the system), block allocation will try huge
40 // page TLB first. If allocation fails, will fall back to normal case.
41 explicit Arena(size_t block_size = kMinBlockSize, size_t huge_page_size = 0);
42 ~Arena();
43
44 char* Allocate(size_t bytes) override;
45
46 // huge_page_size: if >0, will try to allocate from huage page TLB.
47 // The argument will be the size of the page size for huge page TLB. Bytes
48 // will be rounded up to multiple of the page size to allocate through mmap
49 // anonymous option with huge page on. The extra space allocated will be
50 // wasted. If allocation fails, will fall back to normal case. To enable it,
51 // need to reserve huge pages for it to be allocated, like:
52 // sysctl -w vm.nr_hugepages=20
53 // See linux doc Documentation/vm/hugetlbpage.txt for details.
54 // huge page allocation can fail. In this case it will fail back to
55 // normal cases. The messages will be logged to logger. So when calling with
56 // huge_page_tlb_size > 0, we highly recommend a logger is passed in.
57 // Otherwise, the error message will be printed out to stderr directly.
58 char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
59 Logger* logger = nullptr) override;
60
61 // Returns an estimate of the total memory usage of data allocated
62 // by the arena (exclude the space allocated but not yet used for future
63 // allocations).
64 size_t ApproximateMemoryUsage() const {
65 return blocks_memory_ + blocks_.capacity() * sizeof(char*) -
66 alloc_bytes_remaining_;
67 }
68
69 size_t MemoryAllocatedBytes() const { return blocks_memory_; }
70
71 size_t AllocatedAndUnused() const { return alloc_bytes_remaining_; }
72
73 // If an allocation is too big, we'll allocate an irregular block with the
74 // same size of that allocation.
75 size_t IrregularBlockNum() const { return irregular_block_num; }
76
77 size_t BlockSize() const override { return kBlockSize; }
78
79 private:
80 char inline_block_[kInlineSize] __attribute__((__aligned__(sizeof(void*))));
81 // Number of bytes allocated in one block
82 const size_t kBlockSize;
83 // Array of new[] allocated memory blocks
84 typedef std::vector<char*> Blocks;
85 Blocks blocks_;
86
87 struct MmapInfo {
88 void* addr_;
89 size_t length_;
90
91 MmapInfo(void* addr, size_t length) : addr_(addr), length_(length) {}
92 };
93 std::vector<MmapInfo> huge_blocks_;
94 size_t irregular_block_num = 0;
95
96 // Stats for current active block.
97 // For each block, we allocate aligned memory chucks from one end and
98 // allocate unaligned memory chucks from the other end. Otherwise the
99 // memory waste for alignment will be higher if we allocate both types of
100 // memory from one direction.
101 char* unaligned_alloc_ptr_ = nullptr;
102 char* aligned_alloc_ptr_ = nullptr;
103 // How many bytes left in currently active block?
104 size_t alloc_bytes_remaining_ = 0;
105
106 #ifdef MAP_HUGETLB
107 size_t hugetlb_size_ = 0;
108 #endif // MAP_HUGETLB
109 char* AllocateFromHugePage(size_t bytes);
110 char* AllocateFallback(size_t bytes, bool aligned);
111 char* AllocateNewBlock(size_t block_bytes);
112
113 // Bytes of memory in blocks allocated so far
114 size_t blocks_memory_ = 0;
115 };
116
117 inline char* Arena::Allocate(size_t bytes) {
118 // The semantics of what to return are a bit messy if we allow
119 // 0-byte allocations, so we disallow them here (we don't need
120 // them for our internal use).
121 assert(bytes > 0);
122 if (bytes <= alloc_bytes_remaining_) {
123 unaligned_alloc_ptr_ -= bytes;
124 alloc_bytes_remaining_ -= bytes;
125 return unaligned_alloc_ptr_;
126 }
127 return AllocateFallback(bytes, false /* unaligned */);
128 }
129
130 // check and adjust the block_size so that the return value is
131 // 1. in the range of [kMinBlockSize, kMaxBlockSize].
132 // 2. the multiple of align unit.
133 extern size_t OptimizeBlockSize(size_t block_size);
134
135 } // namespace rocksdb