]> git.proxmox.com Git - ceph.git/blob - ceph/src/include/mempool.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / include / mempool.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2016 Allen Samuels <allen.samuels@sandisk.com>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #ifndef _CEPH_INCLUDE_MEMPOOL_H
16 #define _CEPH_INCLUDE_MEMPOOL_H
17
18 #include <cstddef>
19 #include <map>
20 #include <unordered_map>
21 #include <set>
22 #include <vector>
23 #include <list>
24 #include <mutex>
25 #include <atomic>
26 #include <typeinfo>
27 #include <boost/container/flat_set.hpp>
28 #include <boost/container/flat_map.hpp>
29
30 #include <common/Formatter.h>
31 #include "include/ceph_assert.h"
32 #include "include/compact_map.h"
33 #include "include/compact_set.h"
34
35
36 /*
37
38 Memory Pools
39 ============
40
41 A memory pool is a method for accounting the consumption of memory of
42 a set of containers.
43
44 Memory pools are statically declared (see pool_index_t).
45
46 Each memory pool tracks the number of bytes and items it contains.
47
48 Allocators can be declared and associated with a type so that they are
49 tracked independently of the pool total. This additional accounting
50 is optional and only incurs an overhead if the debugging is enabled at
51 runtime. This allows developers to see what types are consuming the
52 pool resources.
53
54
55 Declaring
56 ---------
57
58 Using memory pools is very easy.
59
60 To create a new memory pool, simply add a new name into the list of
61 memory pools that's defined in "DEFINE_MEMORY_POOLS_HELPER". That's
62 it. :)
63
64 For each memory pool that's created a C++ namespace is also
65 automatically created (name is same as in DEFINE_MEMORY_POOLS_HELPER).
66 That namespace contains a set of common STL containers that are predefined
67 with the appropriate allocators.
68
69 Thus for mempool "osd" we have automatically available to us:
70
71 mempool::osd::map
72 mempool::osd::multimap
73 mempool::osd::set
74 mempool::osd::multiset
75 mempool::osd::list
76 mempool::osd::vector
77 mempool::osd::unordered_map
78
79
80 Putting objects in a mempool
81 ----------------------------
82
83 In order to use a memory pool with a particular type, a few additional
84 declarations are needed.
85
86 For a class:
87
88 struct Foo {
89 MEMPOOL_CLASS_HELPERS();
90 ...
91 };
92
93 Then, in an appropriate .cc file,
94
95 MEMPOOL_DEFINE_OBJECT_FACTORY(Foo, foo, osd);
96
97 The second argument can generally be identical to the first, except
98 when the type contains a nested scope. For example, for
99 BlueStore::Onode, we need to do
100
101 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Onode, bluestore_onode,
102 bluestore_meta);
103
104 (This is just because we need to name some static variables and we
105 can't use :: in a variable name.)
106
107 XXX Note: the new operator hard-codes the allocation size to the size of the
108 object given in MEMPOOL_DEFINE_OBJECT_FACTORY. For this reason, you cannot
109 incorporate mempools into a base class without also defining a helper/factory
110 for the child class as well (as the base class is usually smaller than the
111 child class).
112
113 In order to use the STL containers, simply use the namespaced variant
114 of the container type. For example,
115
116 mempool::osd::map<int> myvec;
117
118 Introspection
119 -------------
120
121 The simplest way to interrogate the process is with
122
123 Formater *f = ...
124 mempool::dump(f);
125
126 This will dump information about *all* memory pools. When debug mode
127 is enabled, the runtime complexity of dump is O(num_shards *
128 num_types). When debug name is disabled it is O(num_shards).
129
130 You can also interrogate a specific pool programmatically with
131
132 size_t bytes = mempool::unittest_2::allocated_bytes();
133 size_t items = mempool::unittest_2::allocated_items();
134
135 The runtime complexity is O(num_shards).
136
137 Note that you cannot easily query per-type, primarily because debug
138 mode is optional and you should not rely on that information being
139 available.
140
141 */
142
143 namespace mempool {
144
145 // --------------------------------------------------------------
146 // define memory pools
147
148 #define DEFINE_MEMORY_POOLS_HELPER(f) \
149 f(bloom_filter) \
150 f(bluestore_alloc) \
151 f(bluestore_cache_data) \
152 f(bluestore_cache_onode) \
153 f(bluestore_cache_other) \
154 f(bluestore_fsck) \
155 f(bluestore_txc) \
156 f(bluestore_writing_deferred) \
157 f(bluestore_writing) \
158 f(bluefs) \
159 f(buffer_anon) \
160 f(buffer_meta) \
161 f(osd) \
162 f(osd_mapbl) \
163 f(osd_pglog) \
164 f(osdmap) \
165 f(osdmap_mapping) \
166 f(pgmap) \
167 f(mds_co) \
168 f(unittest_1) \
169 f(unittest_2)
170
171
172 // give them integer ids
173 #define P(x) mempool_##x,
174 enum pool_index_t {
175 DEFINE_MEMORY_POOLS_HELPER(P)
176 num_pools // Must be last.
177 };
178 #undef P
179
180 extern bool debug_mode;
181 extern void set_debug_mode(bool d);
182
183 // --------------------------------------------------------------
184 class pool_t;
185
186 // we shard pool stats across many shard_t's to reduce the amount
187 // of cacheline ping pong.
188 enum {
189 num_shard_bits = 5
190 };
191 enum {
192 num_shards = 1 << num_shard_bits
193 };
194
195 // align shard to a cacheline
196 struct shard_t {
197 std::atomic<size_t> bytes = {0};
198 std::atomic<size_t> items = {0};
199 char __padding[128 - sizeof(std::atomic<size_t>)*2];
200 } __attribute__ ((aligned (128)));
201
202 static_assert(sizeof(shard_t) == 128, "shard_t should be cacheline-sized");
203
204 struct stats_t {
205 ssize_t items = 0;
206 ssize_t bytes = 0;
207 void dump(ceph::Formatter *f) const {
208 f->dump_int("items", items);
209 f->dump_int("bytes", bytes);
210 }
211
212 stats_t& operator+=(const stats_t& o) {
213 items += o.items;
214 bytes += o.bytes;
215 return *this;
216 }
217 };
218
219 pool_t& get_pool(pool_index_t ix);
220 const char *get_pool_name(pool_index_t ix);
221
222 struct type_t {
223 const char *type_name;
224 size_t item_size;
225 std::atomic<ssize_t> items = {0}; // signed
226 };
227
228 struct type_info_hash {
229 std::size_t operator()(const std::type_info& k) const {
230 return k.hash_code();
231 }
232 };
233
234 class pool_t {
235 shard_t shard[num_shards];
236
237 mutable std::mutex lock; // only used for types list
238 std::unordered_map<const char *, type_t> type_map;
239
240 public:
241 //
242 // How much this pool consumes. O(<num_shards>)
243 //
244 size_t allocated_bytes() const;
245 size_t allocated_items() const;
246
247 void adjust_count(ssize_t items, ssize_t bytes);
248
249 shard_t* pick_a_shard() {
250 // Dirt cheap, see:
251 // http://fossies.org/dox/glibc-2.24/pthread__self_8c_source.html
252 size_t me = (size_t)pthread_self();
253 size_t i = (me >> 3) & ((1 << num_shard_bits) - 1);
254 return &shard[i];
255 }
256
257 type_t *get_type(const std::type_info& ti, size_t size) {
258 std::lock_guard<std::mutex> l(lock);
259 auto p = type_map.find(ti.name());
260 if (p != type_map.end()) {
261 return &p->second;
262 }
263 type_t &t = type_map[ti.name()];
264 t.type_name = ti.name();
265 t.item_size = size;
266 return &t;
267 }
268
269 // get pool stats. by_type is not populated if !debug
270 void get_stats(stats_t *total,
271 std::map<std::string, stats_t> *by_type) const;
272
273 void dump(ceph::Formatter *f, stats_t *ptotal=0) const;
274 };
275
276 void dump(ceph::Formatter *f);
277
278
279 // STL allocator for use with containers. All actual state
280 // is stored in the static pool_allocator_base_t, which saves us from
281 // passing the allocator to container constructors.
282
283 template<pool_index_t pool_ix, typename T>
284 class pool_allocator {
285 pool_t *pool;
286 type_t *type = nullptr;
287
288 public:
289 typedef pool_allocator<pool_ix, T> allocator_type;
290 typedef T value_type;
291 typedef value_type *pointer;
292 typedef const value_type * const_pointer;
293 typedef value_type& reference;
294 typedef const value_type& const_reference;
295 typedef std::size_t size_type;
296 typedef std::ptrdiff_t difference_type;
297
298 template<typename U> struct rebind {
299 typedef pool_allocator<pool_ix,U> other;
300 };
301
302 void init(bool force_register) {
303 pool = &get_pool(pool_ix);
304 if (debug_mode || force_register) {
305 type = pool->get_type(typeid(T), sizeof(T));
306 }
307 }
308
309 pool_allocator(bool force_register=false) {
310 init(force_register);
311 }
312 template<typename U>
313 pool_allocator(const pool_allocator<pool_ix,U>&) {
314 init(false);
315 }
316
317 T* allocate(size_t n, void *p = nullptr) {
318 size_t total = sizeof(T) * n;
319 shard_t *shard = pool->pick_a_shard();
320 shard->bytes += total;
321 shard->items += n;
322 if (type) {
323 type->items += n;
324 }
325 T* r = reinterpret_cast<T*>(new char[total]);
326 return r;
327 }
328
329 void deallocate(T* p, size_t n) {
330 size_t total = sizeof(T) * n;
331 shard_t *shard = pool->pick_a_shard();
332 shard->bytes -= total;
333 shard->items -= n;
334 if (type) {
335 type->items -= n;
336 }
337 delete[] reinterpret_cast<char*>(p);
338 }
339
340 T* allocate_aligned(size_t n, size_t align, void *p = nullptr) {
341 size_t total = sizeof(T) * n;
342 shard_t *shard = pool->pick_a_shard();
343 shard->bytes += total;
344 shard->items += n;
345 if (type) {
346 type->items += n;
347 }
348 char *ptr;
349 int rc = ::posix_memalign((void**)(void*)&ptr, align, total);
350 if (rc)
351 throw std::bad_alloc();
352 T* r = reinterpret_cast<T*>(ptr);
353 return r;
354 }
355
356 void deallocate_aligned(T* p, size_t n) {
357 size_t total = sizeof(T) * n;
358 shard_t *shard = pool->pick_a_shard();
359 shard->bytes -= total;
360 shard->items -= n;
361 if (type) {
362 type->items -= n;
363 }
364 ::free(p);
365 }
366
367 void destroy(T* p) {
368 p->~T();
369 }
370
371 template<class U>
372 void destroy(U *p) {
373 p->~U();
374 }
375
376 void construct(T* p, const T& val) {
377 ::new ((void *)p) T(val);
378 }
379
380 template<class U, class... Args> void construct(U* p,Args&&... args) {
381 ::new((void *)p) U(std::forward<Args>(args)...);
382 }
383
384 bool operator==(const pool_allocator&) const { return true; }
385 bool operator!=(const pool_allocator&) const { return false; }
386 };
387
388
389 // Namespace mempool
390
391 #define P(x) \
392 namespace x { \
393 static const mempool::pool_index_t id = mempool::mempool_##x; \
394 template<typename v> \
395 using pool_allocator = mempool::pool_allocator<id,v>; \
396 \
397 using string = std::basic_string<char,std::char_traits<char>, \
398 pool_allocator<char>>; \
399 \
400 template<typename k,typename v, typename cmp = std::less<k> > \
401 using map = std::map<k, v, cmp, \
402 pool_allocator<std::pair<const k,v>>>; \
403 \
404 template<typename k,typename v, typename cmp = std::less<k> > \
405 using compact_map = compact_map<k, v, cmp, \
406 pool_allocator<std::pair<const k,v>>>; \
407 \
408 template<typename k,typename v, typename cmp = std::less<k> > \
409 using compact_multimap = compact_multimap<k, v, cmp, \
410 pool_allocator<std::pair<const k,v>>>; \
411 \
412 template<typename k, typename cmp = std::less<k> > \
413 using compact_set = compact_set<k, cmp, pool_allocator<k>>; \
414 \
415 template<typename k,typename v, typename cmp = std::less<k> > \
416 using multimap = std::multimap<k,v,cmp, \
417 pool_allocator<std::pair<const k, \
418 v>>>; \
419 \
420 template<typename k, typename cmp = std::less<k> > \
421 using set = std::set<k,cmp,pool_allocator<k>>; \
422 \
423 template<typename k, typename cmp = std::less<k> > \
424 using flat_set = boost::container::flat_set<k,cmp,pool_allocator<k>>; \
425 \
426 template<typename k, typename v, typename cmp = std::less<k> > \
427 using flat_map = boost::container::flat_map<k,v,cmp, \
428 pool_allocator<std::pair<k,v>>>; \
429 \
430 template<typename v> \
431 using list = std::list<v,pool_allocator<v>>; \
432 \
433 template<typename v> \
434 using vector = std::vector<v,pool_allocator<v>>; \
435 \
436 template<typename k, typename v, \
437 typename h=std::hash<k>, \
438 typename eq = std::equal_to<k>> \
439 using unordered_map = \
440 std::unordered_map<k,v,h,eq,pool_allocator<std::pair<const k,v>>>;\
441 \
442 inline size_t allocated_bytes() { \
443 return mempool::get_pool(id).allocated_bytes(); \
444 } \
445 inline size_t allocated_items() { \
446 return mempool::get_pool(id).allocated_items(); \
447 } \
448 };
449
450 DEFINE_MEMORY_POOLS_HELPER(P)
451
452 #undef P
453
454 };
455
456 // the elements allocated by mempool is in the same memory space as the ones
457 // allocated by the default allocator. so compare them in an efficient way:
458 // libstdc++'s std::equal is specialized to use memcmp if T is integer or
459 // pointer. this is good enough for our usecase. use
460 // std::is_trivially_copyable<T> to expand the support to more types if
461 // nececssary.
462 template<typename T, mempool::pool_index_t pool_index>
463 bool operator==(const std::vector<T, std::allocator<T>>& lhs,
464 const std::vector<T, mempool::pool_allocator<pool_index, T>>& rhs)
465 {
466 return (lhs.size() == rhs.size() &&
467 std::equal(lhs.begin(), lhs.end(), rhs.begin()));
468 }
469
470 template<typename T, mempool::pool_index_t pool_index>
471 bool operator!=(const std::vector<T, std::allocator<T>>& lhs,
472 const std::vector<T, mempool::pool_allocator<pool_index, T>>& rhs)
473 {
474 return !(lhs == rhs);
475 }
476
477 template<typename T, mempool::pool_index_t pool_index>
478 bool operator==(const std::vector<T, mempool::pool_allocator<pool_index, T>>& lhs,
479 const std::vector<T, std::allocator<T>>& rhs)
480 {
481 return rhs == lhs;
482 }
483
484 template<typename T, mempool::pool_index_t pool_index>
485 bool operator!=(const std::vector<T, mempool::pool_allocator<pool_index, T>>& lhs,
486 const std::vector<T, std::allocator<T>>& rhs)
487 {
488 return !(lhs == rhs);
489 }
490
491 // Use this for any type that is contained by a container (unless it
492 // is a class you defined; see below).
493 #define MEMPOOL_DECLARE_FACTORY(obj, factoryname, pool) \
494 namespace mempool { \
495 namespace pool { \
496 extern pool_allocator<obj> alloc_##factoryname; \
497 } \
498 }
499
500 #define MEMPOOL_DEFINE_FACTORY(obj, factoryname, pool) \
501 namespace mempool { \
502 namespace pool { \
503 pool_allocator<obj> alloc_##factoryname = {true}; \
504 } \
505 }
506
507 // Use this for each class that belongs to a mempool. For example,
508 //
509 // class T {
510 // MEMPOOL_CLASS_HELPERS();
511 // ...
512 // };
513 //
514 #define MEMPOOL_CLASS_HELPERS() \
515 void *operator new(size_t size); \
516 void *operator new[](size_t size) noexcept { \
517 ceph_abort_msg("no array new"); \
518 return nullptr; } \
519 void operator delete(void *); \
520 void operator delete[](void *) { ceph_abort_msg("no array delete"); }
521
522
523 // Use this in some particular .cc file to match each class with a
524 // MEMPOOL_CLASS_HELPERS().
525 #define MEMPOOL_DEFINE_OBJECT_FACTORY(obj,factoryname,pool) \
526 MEMPOOL_DEFINE_FACTORY(obj, factoryname, pool) \
527 void *obj::operator new(size_t size) { \
528 return mempool::pool::alloc_##factoryname.allocate(1); \
529 } \
530 void obj::operator delete(void *p) { \
531 return mempool::pool::alloc_##factoryname.deallocate((obj*)p, 1); \
532 }
533
534 #endif