]>
git.proxmox.com Git - ceph.git/blob - ceph/src/common/mempool.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2016 Allen Samuels <allen.samuels@sandisk.com>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
15 #include "include/mempool.h"
16 #include "include/demangle.h"
18 // Thread local variables should save index, not &shard[index],
19 // because shard[] is defined in the class
20 static thread_local
size_t thread_shard_index
= mempool::num_shards
;
22 // default to debug_mode off
23 bool mempool::debug_mode
= false;
25 // --------------------------------------------------------------
27 mempool::pool_t
& mempool::get_pool(mempool::pool_index_t ix
)
29 // We rely on this array being initialized before any invocation of
30 // this function, even if it is called by ctors in other compilation
31 // units that are being initialized before this compilation unit.
32 static mempool::pool_t table
[num_pools
];
36 const char *mempool::get_pool_name(mempool::pool_index_t ix
) {
38 static const char *names
[num_pools
] = {
39 DEFINE_MEMORY_POOLS_HELPER(P
)
45 void mempool::dump(ceph::Formatter
*f
)
48 f
->open_object_section("mempool"); // we need (dummy?) topmost section for
49 // JSON Formatter to print pool names. It omits them otherwise.
50 f
->open_object_section("by_pool");
51 for (size_t i
= 0; i
< num_pools
; ++i
) {
52 const pool_t
&pool
= mempool::get_pool((pool_index_t
)i
);
53 f
->open_object_section(get_pool_name((pool_index_t
)i
));
58 f
->dump_object("total", total
);
62 void mempool::set_debug_mode(bool d
)
67 // --------------------------------------------------------------
70 size_t mempool::pool_t::allocated_bytes() const
73 for (size_t i
= 0; i
< num_shards
; ++i
) {
74 result
+= shard
[i
].bytes
;
77 // we raced with some unbalanced allocations/deallocations
80 return (size_t) result
;
83 size_t mempool::pool_t::allocated_items() const
86 for (size_t i
= 0; i
< num_shards
; ++i
) {
87 result
+= shard
[i
].items
;
90 // we raced with some unbalanced allocations/deallocations
93 return (size_t) result
;
96 void mempool::pool_t::adjust_count(ssize_t items
, ssize_t bytes
)
98 thread_shard_index
= (thread_shard_index
== num_shards
) ? pick_a_shard_int() : thread_shard_index
;
99 shard
[thread_shard_index
].items
+= items
;
100 shard
[thread_shard_index
].bytes
+= bytes
;
103 void mempool::pool_t::get_stats(
105 std::map
<std::string
, stats_t
> *by_type
) const
107 for (size_t i
= 0; i
< num_shards
; ++i
) {
108 total
->items
+= shard
[i
].items
;
109 total
->bytes
+= shard
[i
].bytes
;
112 std::lock_guard
shard_lock(lock
);
113 for (auto &p
: type_map
) {
114 std::string n
= ceph_demangle(p
.second
.type_name
);
115 stats_t
&s
= (*by_type
)[n
];
116 s
.bytes
= p
.second
.items
* p
.second
.item_size
;
117 s
.items
= p
.second
.items
;
122 void mempool::pool_t::dump(ceph::Formatter
*f
, stats_t
*ptotal
) const
125 std::map
<std::string
, stats_t
> by_type
;
126 get_stats(&total
, &by_type
);
131 if (!by_type
.empty()) {
132 f
->open_object_section("by_type");
133 for (auto &i
: by_type
) {
134 f
->open_object_section(i
.first
.c_str());