]>
git.proxmox.com Git - ceph.git/blob - ceph/src/rocksdb/memory/arena.h
1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
10 // Arena is an implementation of Allocator class. For a request of small size,
11 // it allocates a block with pre-defined block size. For a request of big
12 // size, it uses malloc to directly get the requested size.
23 #include "memory/allocator.h"
24 #include "util/mutexlock.h"
26 namespace ROCKSDB_NAMESPACE
{
28 class Arena
: public Allocator
{
31 Arena(const Arena
&) = delete;
32 void operator=(const Arena
&) = delete;
34 static const size_t kInlineSize
= 2048;
35 static const size_t kMinBlockSize
;
36 static const size_t kMaxBlockSize
;
38 // huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the
39 // supported hugepage size of the system), block allocation will try huge
40 // page TLB first. If allocation fails, will fall back to normal case.
41 explicit Arena(size_t block_size
= kMinBlockSize
,
42 AllocTracker
* tracker
= nullptr, size_t huge_page_size
= 0);
45 char* Allocate(size_t bytes
) override
;
47 // huge_page_size: if >0, will try to allocate from huage page TLB.
48 // The argument will be the size of the page size for huge page TLB. Bytes
49 // will be rounded up to multiple of the page size to allocate through mmap
50 // anonymous option with huge page on. The extra space allocated will be
51 // wasted. If allocation fails, will fall back to normal case. To enable it,
52 // need to reserve huge pages for it to be allocated, like:
53 // sysctl -w vm.nr_hugepages=20
54 // See linux doc Documentation/vm/hugetlbpage.txt for details.
55 // huge page allocation can fail. In this case it will fail back to
56 // normal cases. The messages will be logged to logger. So when calling with
57 // huge_page_tlb_size > 0, we highly recommend a logger is passed in.
58 // Otherwise, the error message will be printed out to stderr directly.
59 char* AllocateAligned(size_t bytes
, size_t huge_page_size
= 0,
60 Logger
* logger
= nullptr) override
;
62 // Returns an estimate of the total memory usage of data allocated
63 // by the arena (exclude the space allocated but not yet used for future
65 size_t ApproximateMemoryUsage() const {
66 return blocks_memory_
+ blocks_
.capacity() * sizeof(char*) -
67 alloc_bytes_remaining_
;
70 size_t MemoryAllocatedBytes() const { return blocks_memory_
; }
72 size_t AllocatedAndUnused() const { return alloc_bytes_remaining_
; }
74 // If an allocation is too big, we'll allocate an irregular block with the
75 // same size of that allocation.
76 size_t IrregularBlockNum() const { return irregular_block_num
; }
78 size_t BlockSize() const override
{ return kBlockSize
; }
80 bool IsInInlineBlock() const {
81 return blocks_
.empty() && huge_blocks_
.empty();
85 char inline_block_
[kInlineSize
] __attribute__((__aligned__(alignof(max_align_t
))));
86 // Number of bytes allocated in one block
87 const size_t kBlockSize
;
88 // Array of new[] allocated memory blocks
89 using Blocks
= std::vector
<char*>;
96 MmapInfo(void* addr
, size_t length
) : addr_(addr
), length_(length
) {}
98 std::vector
<MmapInfo
> huge_blocks_
;
99 size_t irregular_block_num
= 0;
101 // Stats for current active block.
102 // For each block, we allocate aligned memory chucks from one end and
103 // allocate unaligned memory chucks from the other end. Otherwise the
104 // memory waste for alignment will be higher if we allocate both types of
105 // memory from one direction.
106 char* unaligned_alloc_ptr_
= nullptr;
107 char* aligned_alloc_ptr_
= nullptr;
108 // How many bytes left in currently active block?
109 size_t alloc_bytes_remaining_
= 0;
112 size_t hugetlb_size_
= 0;
113 #endif // MAP_HUGETLB
114 char* AllocateFromHugePage(size_t bytes
);
115 char* AllocateFallback(size_t bytes
, bool aligned
);
116 char* AllocateNewBlock(size_t block_bytes
);
118 // Bytes of memory in blocks allocated so far
119 size_t blocks_memory_
= 0;
120 AllocTracker
* tracker_
;
123 inline char* Arena::Allocate(size_t bytes
) {
124 // The semantics of what to return are a bit messy if we allow
125 // 0-byte allocations, so we disallow them here (we don't need
126 // them for our internal use).
128 if (bytes
<= alloc_bytes_remaining_
) {
129 unaligned_alloc_ptr_
-= bytes
;
130 alloc_bytes_remaining_
-= bytes
;
131 return unaligned_alloc_ptr_
;
133 return AllocateFallback(bytes
, false /* unaligned */);
136 // check and adjust the block_size so that the return value is
137 // 1. in the range of [kMinBlockSize, kMaxBlockSize].
138 // 2. the multiple of align unit.
139 extern size_t OptimizeBlockSize(size_t block_size
);
141 } // namespace ROCKSDB_NAMESPACE