]>
git.proxmox.com Git - ceph.git/blob - ceph/src/common/mempool.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2016 Allen Samuels <allen.samuels@sandisk.com>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
15 #include "include/mempool.h"
16 #include "include/demangle.h"
19 // default to debug_mode off
20 bool mempool::debug_mode
= false;
22 // --------------------------------------------------------------
24 mempool::pool_t
& mempool::get_pool(mempool::pool_index_t ix
)
26 // We rely on this array being initialized before any invocation of
27 // this function, even if it is called by ctors in other compilation
28 // units that are being initialized before this compilation unit.
29 static mempool::pool_t table
[num_pools
];
33 const char *mempool::get_pool_name(mempool::pool_index_t ix
) {
35 static const char *names
[num_pools
] = {
36 DEFINE_MEMORY_POOLS_HELPER(P
)
42 void mempool::dump(ceph::Formatter
*f
)
45 f
->open_object_section("mempool"); // we need (dummy?) topmost section for
46 // JSON Formatter to print pool names. It omits them otherwise.
47 f
->open_object_section("by_pool");
48 for (size_t i
= 0; i
< num_pools
; ++i
) {
49 const pool_t
&pool
= mempool::get_pool((pool_index_t
)i
);
50 f
->open_object_section(get_pool_name((pool_index_t
)i
));
55 f
->dump_object("total", total
);
59 void mempool::set_debug_mode(bool d
)
64 // --------------------------------------------------------------
67 size_t mempool::pool_t::allocated_bytes() const
70 for (size_t i
= 0; i
< num_shards
; ++i
) {
71 result
+= shard
[i
].bytes
;
74 // we raced with some unbalanced allocations/deallocations
77 return (size_t) result
;
80 size_t mempool::pool_t::allocated_items() const
83 for (size_t i
= 0; i
< num_shards
; ++i
) {
84 result
+= shard
[i
].items
;
87 // we raced with some unbalanced allocations/deallocations
90 return (size_t) result
;
93 void mempool::pool_t::adjust_count(ssize_t items
, ssize_t bytes
)
95 shard_t
*shard
= pick_a_shard();
96 shard
->items
+= items
;
97 shard
->bytes
+= bytes
;
100 void mempool::pool_t::get_stats(
102 std::map
<std::string
, stats_t
> *by_type
) const
104 for (size_t i
= 0; i
< num_shards
; ++i
) {
105 total
->items
+= shard
[i
].items
;
106 total
->bytes
+= shard
[i
].bytes
;
109 std::lock_guard
shard_lock(lock
);
110 for (auto &p
: type_map
) {
111 std::string n
= ceph_demangle(p
.second
.type_name
);
112 stats_t
&s
= (*by_type
)[n
];
113 s
.bytes
= p
.second
.items
* p
.second
.item_size
;
114 s
.items
= p
.second
.items
;
119 void mempool::pool_t::dump(ceph::Formatter
*f
, stats_t
*ptotal
) const
122 std::map
<std::string
, stats_t
> by_type
;
123 get_stats(&total
, &by_type
);
128 if (!by_type
.empty()) {
129 f
->open_object_section("by_type");
130 for (auto &i
: by_type
) {
131 f
->open_object_section(i
.first
.c_str());