]> git.proxmox.com Git - ceph.git/blob - ceph/src/include/mempool.h
Import ceph 15.2.8
[ceph.git] / ceph / src / include / mempool.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2016 Allen Samuels <allen.samuels@sandisk.com>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #ifndef _CEPH_INCLUDE_MEMPOOL_H
16 #define _CEPH_INCLUDE_MEMPOOL_H
17
18 #include <cstddef>
19 #include <map>
20 #include <unordered_map>
21 #include <set>
22 #include <vector>
23 #include <list>
24 #include <mutex>
25 #include <typeinfo>
26 #include <boost/container/flat_set.hpp>
27 #include <boost/container/flat_map.hpp>
28
29 #include "common/Formatter.h"
30 #include "common/ceph_atomic.h"
31 #include "include/ceph_assert.h"
32 #include "include/compact_map.h"
33 #include "include/compact_set.h"
34
35
36 /*
37
38 Memory Pools
39 ============
40
41 A memory pool is a method for accounting the consumption of memory of
42 a set of containers.
43
44 Memory pools are statically declared (see pool_index_t).
45
46 Each memory pool tracks the number of bytes and items it contains.
47
48 Allocators can be declared and associated with a type so that they are
49 tracked independently of the pool total. This additional accounting
50 is optional and only incurs an overhead if the debugging is enabled at
51 runtime. This allows developers to see what types are consuming the
52 pool resources.
53
54
55 Declaring
56 ---------
57
58 Using memory pools is very easy.
59
60 To create a new memory pool, simply add a new name into the list of
61 memory pools that's defined in "DEFINE_MEMORY_POOLS_HELPER". That's
62 it. :)
63
64 For each memory pool that's created a C++ namespace is also
65 automatically created (name is same as in DEFINE_MEMORY_POOLS_HELPER).
66 That namespace contains a set of common STL containers that are predefined
67 with the appropriate allocators.
68
69 Thus for mempool "osd" we have automatically available to us:
70
71 mempool::osd::map
72 mempool::osd::multimap
73 mempool::osd::set
74 mempool::osd::multiset
75 mempool::osd::list
76 mempool::osd::vector
77 mempool::osd::unordered_map
78
79
80 Putting objects in a mempool
81 ----------------------------
82
83 In order to use a memory pool with a particular type, a few additional
84 declarations are needed.
85
86 For a class:
87
88 struct Foo {
89 MEMPOOL_CLASS_HELPERS();
90 ...
91 };
92
93 Then, in an appropriate .cc file,
94
95 MEMPOOL_DEFINE_OBJECT_FACTORY(Foo, foo, osd);
96
97 The second argument can generally be identical to the first, except
98 when the type contains a nested scope. For example, for
99 BlueStore::Onode, we need to do
100
101 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Onode, bluestore_onode,
102 bluestore_meta);
103
104 (This is just because we need to name some static variables and we
105 can't use :: in a variable name.)
106
107 XXX Note: the new operator hard-codes the allocation size to the size of the
108 object given in MEMPOOL_DEFINE_OBJECT_FACTORY. For this reason, you cannot
109 incorporate mempools into a base class without also defining a helper/factory
110 for the child class as well (as the base class is usually smaller than the
111 child class).
112
113 In order to use the STL containers, simply use the namespaced variant
114 of the container type. For example,
115
116 mempool::osd::map<int> myvec;
117
118 Introspection
119 -------------
120
121 The simplest way to interrogate the process is with
122
123 Formater *f = ...
124 mempool::dump(f);
125
126 This will dump information about *all* memory pools. When debug mode
127 is enabled, the runtime complexity of dump is O(num_shards *
128 num_types). When debug name is disabled it is O(num_shards).
129
130 You can also interrogate a specific pool programmatically with
131
132 size_t bytes = mempool::unittest_2::allocated_bytes();
133 size_t items = mempool::unittest_2::allocated_items();
134
135 The runtime complexity is O(num_shards).
136
137 Note that you cannot easily query per-type, primarily because debug
138 mode is optional and you should not rely on that information being
139 available.
140
141 */
142
143 namespace mempool {
144
145 // --------------------------------------------------------------
146 // define memory pools
147
148 #define DEFINE_MEMORY_POOLS_HELPER(f) \
149 f(bloom_filter) \
150 f(bluestore_alloc) \
151 f(bluestore_cache_data) \
152 f(bluestore_cache_onode) \
153 f(bluestore_cache_meta) \
154 f(bluestore_cache_other) \
155 f(bluestore_Buffer) \
156 f(bluestore_Extent) \
157 f(bluestore_Blob) \
158 f(bluestore_SharedBlob) \
159 f(bluestore_inline_bl) \
160 f(bluestore_fsck) \
161 f(bluestore_txc) \
162 f(bluestore_writing_deferred) \
163 f(bluestore_writing) \
164 f(bluefs) \
165 f(bluefs_file_reader) \
166 f(bluefs_file_writer) \
167 f(buffer_anon) \
168 f(buffer_meta) \
169 f(osd) \
170 f(osd_mapbl) \
171 f(osd_pglog) \
172 f(osdmap) \
173 f(osdmap_mapping) \
174 f(pgmap) \
175 f(mds_co) \
176 f(unittest_1) \
177 f(unittest_2)
178
179
180 // give them integer ids
181 #define P(x) mempool_##x,
182 enum pool_index_t {
183 DEFINE_MEMORY_POOLS_HELPER(P)
184 num_pools // Must be last.
185 };
186 #undef P
187
188 extern bool debug_mode;
189 extern void set_debug_mode(bool d);
190
191 // --------------------------------------------------------------
192 class pool_t;
193
194 // we shard pool stats across many shard_t's to reduce the amount
195 // of cacheline ping pong.
196 enum {
197 num_shard_bits = 5
198 };
199 enum {
200 num_shards = 1 << num_shard_bits
201 };
202
203 // align shard to a cacheline
204 struct shard_t {
205 ceph::atomic<size_t> bytes = {0};
206 ceph::atomic<size_t> items = {0};
207 char __padding[128 - sizeof(ceph::atomic<size_t>)*2];
208 } __attribute__ ((aligned (128)));
209
210 static_assert(sizeof(shard_t) == 128, "shard_t should be cacheline-sized");
211
212 struct stats_t {
213 ssize_t items = 0;
214 ssize_t bytes = 0;
215 void dump(ceph::Formatter *f) const {
216 f->dump_int("items", items);
217 f->dump_int("bytes", bytes);
218 }
219
220 stats_t& operator+=(const stats_t& o) {
221 items += o.items;
222 bytes += o.bytes;
223 return *this;
224 }
225 };
226
227 pool_t& get_pool(pool_index_t ix);
228 const char *get_pool_name(pool_index_t ix);
229
230 struct type_t {
231 const char *type_name;
232 size_t item_size;
233 ceph::atomic<ssize_t> items = {0}; // signed
234 };
235
236 struct type_info_hash {
237 std::size_t operator()(const std::type_info& k) const {
238 return k.hash_code();
239 }
240 };
241
242 class pool_t {
243 shard_t shard[num_shards];
244
245 mutable std::mutex lock; // only used for types list
246 std::unordered_map<const char *, type_t> type_map;
247
248 public:
249 //
250 // How much this pool consumes. O(<num_shards>)
251 //
252 size_t allocated_bytes() const;
253 size_t allocated_items() const;
254
255 void adjust_count(ssize_t items, ssize_t bytes);
256
257 shard_t* pick_a_shard() {
258 // Dirt cheap, see:
259 // http://fossies.org/dox/glibc-2.24/pthread__self_8c_source.html
260 size_t me = (size_t)pthread_self();
261 size_t i = (me >> 3) & ((1 << num_shard_bits) - 1);
262 return &shard[i];
263 }
264
265 type_t *get_type(const std::type_info& ti, size_t size) {
266 std::lock_guard<std::mutex> l(lock);
267 auto p = type_map.find(ti.name());
268 if (p != type_map.end()) {
269 return &p->second;
270 }
271 type_t &t = type_map[ti.name()];
272 t.type_name = ti.name();
273 t.item_size = size;
274 return &t;
275 }
276
277 // get pool stats. by_type is not populated if !debug
278 void get_stats(stats_t *total,
279 std::map<std::string, stats_t> *by_type) const;
280
281 void dump(ceph::Formatter *f, stats_t *ptotal=0) const;
282 };
283
284 void dump(ceph::Formatter *f);
285
286
287 // STL allocator for use with containers. All actual state
288 // is stored in the static pool_allocator_base_t, which saves us from
289 // passing the allocator to container constructors.
290
291 template<pool_index_t pool_ix, typename T>
292 class pool_allocator {
293 pool_t *pool;
294 type_t *type = nullptr;
295
296 public:
297 typedef pool_allocator<pool_ix, T> allocator_type;
298 typedef T value_type;
299 typedef value_type *pointer;
300 typedef const value_type * const_pointer;
301 typedef value_type& reference;
302 typedef const value_type& const_reference;
303 typedef std::size_t size_type;
304 typedef std::ptrdiff_t difference_type;
305
306 template<typename U> struct rebind {
307 typedef pool_allocator<pool_ix,U> other;
308 };
309
310 void init(bool force_register) {
311 pool = &get_pool(pool_ix);
312 if (debug_mode || force_register) {
313 type = pool->get_type(typeid(T), sizeof(T));
314 }
315 }
316
317 pool_allocator(bool force_register=false) {
318 init(force_register);
319 }
320 template<typename U>
321 pool_allocator(const pool_allocator<pool_ix,U>&) {
322 init(false);
323 }
324
325 T* allocate(size_t n, void *p = nullptr) {
326 size_t total = sizeof(T) * n;
327 shard_t *shard = pool->pick_a_shard();
328 shard->bytes += total;
329 shard->items += n;
330 if (type) {
331 type->items += n;
332 }
333 T* r = reinterpret_cast<T*>(new char[total]);
334 return r;
335 }
336
337 void deallocate(T* p, size_t n) {
338 size_t total = sizeof(T) * n;
339 shard_t *shard = pool->pick_a_shard();
340 shard->bytes -= total;
341 shard->items -= n;
342 if (type) {
343 type->items -= n;
344 }
345 delete[] reinterpret_cast<char*>(p);
346 }
347
348 T* allocate_aligned(size_t n, size_t align, void *p = nullptr) {
349 size_t total = sizeof(T) * n;
350 shard_t *shard = pool->pick_a_shard();
351 shard->bytes += total;
352 shard->items += n;
353 if (type) {
354 type->items += n;
355 }
356 char *ptr;
357 int rc = ::posix_memalign((void**)(void*)&ptr, align, total);
358 if (rc)
359 throw std::bad_alloc();
360 T* r = reinterpret_cast<T*>(ptr);
361 return r;
362 }
363
364 void deallocate_aligned(T* p, size_t n) {
365 size_t total = sizeof(T) * n;
366 shard_t *shard = pool->pick_a_shard();
367 shard->bytes -= total;
368 shard->items -= n;
369 if (type) {
370 type->items -= n;
371 }
372 ::free(p);
373 }
374
375 void destroy(T* p) {
376 p->~T();
377 }
378
379 template<class U>
380 void destroy(U *p) {
381 p->~U();
382 }
383
384 void construct(T* p, const T& val) {
385 ::new ((void *)p) T(val);
386 }
387
388 template<class U, class... Args> void construct(U* p,Args&&... args) {
389 ::new((void *)p) U(std::forward<Args>(args)...);
390 }
391
392 bool operator==(const pool_allocator&) const { return true; }
393 bool operator!=(const pool_allocator&) const { return false; }
394 };
395
396
397 // Namespace mempool
398
399 #define P(x) \
400 namespace x { \
401 static const mempool::pool_index_t id = mempool::mempool_##x; \
402 template<typename v> \
403 using pool_allocator = mempool::pool_allocator<id,v>; \
404 \
405 using string = std::basic_string<char,std::char_traits<char>, \
406 pool_allocator<char>>; \
407 \
408 template<typename k,typename v, typename cmp = std::less<k> > \
409 using map = std::map<k, v, cmp, \
410 pool_allocator<std::pair<const k,v>>>; \
411 \
412 template<typename k,typename v, typename cmp = std::less<k> > \
413 using compact_map = compact_map<k, v, cmp, \
414 pool_allocator<std::pair<const k,v>>>; \
415 \
416 template<typename k,typename v, typename cmp = std::less<k> > \
417 using compact_multimap = compact_multimap<k, v, cmp, \
418 pool_allocator<std::pair<const k,v>>>; \
419 \
420 template<typename k, typename cmp = std::less<k> > \
421 using compact_set = compact_set<k, cmp, pool_allocator<k>>; \
422 \
423 template<typename k,typename v, typename cmp = std::less<k> > \
424 using multimap = std::multimap<k,v,cmp, \
425 pool_allocator<std::pair<const k, \
426 v>>>; \
427 \
428 template<typename k, typename cmp = std::less<k> > \
429 using set = std::set<k,cmp,pool_allocator<k>>; \
430 \
431 template<typename k, typename cmp = std::less<k> > \
432 using flat_set = boost::container::flat_set<k,cmp,pool_allocator<k>>; \
433 \
434 template<typename k, typename v, typename cmp = std::less<k> > \
435 using flat_map = boost::container::flat_map<k,v,cmp, \
436 pool_allocator<std::pair<k,v>>>; \
437 \
438 template<typename v> \
439 using list = std::list<v,pool_allocator<v>>; \
440 \
441 template<typename v> \
442 using vector = std::vector<v,pool_allocator<v>>; \
443 \
444 template<typename k, typename v, \
445 typename h=std::hash<k>, \
446 typename eq = std::equal_to<k>> \
447 using unordered_map = \
448 std::unordered_map<k,v,h,eq,pool_allocator<std::pair<const k,v>>>;\
449 \
450 inline size_t allocated_bytes() { \
451 return mempool::get_pool(id).allocated_bytes(); \
452 } \
453 inline size_t allocated_items() { \
454 return mempool::get_pool(id).allocated_items(); \
455 } \
456 };
457
458 DEFINE_MEMORY_POOLS_HELPER(P)
459
460 #undef P
461
462 };
463
464 // the elements allocated by mempool is in the same memory space as the ones
465 // allocated by the default allocator. so compare them in an efficient way:
466 // libstdc++'s std::equal is specialized to use memcmp if T is integer or
467 // pointer. this is good enough for our usecase. use
468 // std::is_trivially_copyable<T> to expand the support to more types if
469 // nececssary.
470 template<typename T, mempool::pool_index_t pool_index>
471 bool operator==(const std::vector<T, std::allocator<T>>& lhs,
472 const std::vector<T, mempool::pool_allocator<pool_index, T>>& rhs)
473 {
474 return (lhs.size() == rhs.size() &&
475 std::equal(lhs.begin(), lhs.end(), rhs.begin()));
476 }
477
478 template<typename T, mempool::pool_index_t pool_index>
479 bool operator!=(const std::vector<T, std::allocator<T>>& lhs,
480 const std::vector<T, mempool::pool_allocator<pool_index, T>>& rhs)
481 {
482 return !(lhs == rhs);
483 }
484
485 template<typename T, mempool::pool_index_t pool_index>
486 bool operator==(const std::vector<T, mempool::pool_allocator<pool_index, T>>& lhs,
487 const std::vector<T, std::allocator<T>>& rhs)
488 {
489 return rhs == lhs;
490 }
491
492 template<typename T, mempool::pool_index_t pool_index>
493 bool operator!=(const std::vector<T, mempool::pool_allocator<pool_index, T>>& lhs,
494 const std::vector<T, std::allocator<T>>& rhs)
495 {
496 return !(lhs == rhs);
497 }
498
499 // Use this for any type that is contained by a container (unless it
500 // is a class you defined; see below).
501 #define MEMPOOL_DECLARE_FACTORY(obj, factoryname, pool) \
502 namespace mempool { \
503 namespace pool { \
504 extern pool_allocator<obj> alloc_##factoryname; \
505 } \
506 }
507
508 #define MEMPOOL_DEFINE_FACTORY(obj, factoryname, pool) \
509 namespace mempool { \
510 namespace pool { \
511 pool_allocator<obj> alloc_##factoryname = {true}; \
512 } \
513 }
514
515 // Use this for each class that belongs to a mempool. For example,
516 //
517 // class T {
518 // MEMPOOL_CLASS_HELPERS();
519 // ...
520 // };
521 //
522 #define MEMPOOL_CLASS_HELPERS() \
523 void *operator new(size_t size); \
524 void *operator new[](size_t size) noexcept { \
525 ceph_abort_msg("no array new"); \
526 return nullptr; } \
527 void operator delete(void *); \
528 void operator delete[](void *) { ceph_abort_msg("no array delete"); }
529
530
531 // Use this in some particular .cc file to match each class with a
532 // MEMPOOL_CLASS_HELPERS().
533 #define MEMPOOL_DEFINE_OBJECT_FACTORY(obj,factoryname,pool) \
534 MEMPOOL_DEFINE_FACTORY(obj, factoryname, pool) \
535 void *obj::operator new(size_t size) { \
536 return mempool::pool::alloc_##factoryname.allocate(1); \
537 } \
538 void obj::operator delete(void *p) { \
539 return mempool::pool::alloc_##factoryname.deallocate((obj*)p, 1); \
540 }
541
542 #endif