]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | /* | |
4 | * Ceph - scalable distributed file system | |
5 | * | |
6 | * Copyright (C) 2013 Inktank, Inc | |
7 | * | |
8 | * This is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License version 2.1, as published by the Free Software | |
11 | * Foundation. See file COPYING. | |
12 | * | |
13 | */ | |
14 | #include "include/memory.h" | |
15 | #include <errno.h> | |
16 | #include <map> | |
17 | #include <list> | |
18 | #include <string> | |
19 | #include <sstream> | |
20 | ||
21 | #include "acconfig.h" | |
22 | ||
23 | #ifdef HAVE_SYS_VFS_H | |
24 | #include <sys/vfs.h> | |
25 | #endif | |
26 | ||
27 | #ifdef HAVE_SYS_MOUNT_H | |
28 | #include <sys/mount.h> | |
29 | #endif | |
30 | ||
31 | #ifdef HAVE_SYS_PARAM_H | |
32 | #include <sys/param.h> | |
33 | #endif | |
34 | ||
35 | #include "messages/MMonHealth.h" | |
36 | #include "include/assert.h" | |
37 | #include "common/Formatter.h" | |
38 | #include "common/errno.h" | |
39 | ||
40 | #include "mon/Monitor.h" | |
41 | #include "mon/DataHealthService.h" | |
42 | ||
43 | #define dout_subsys ceph_subsys_mon | |
44 | #undef dout_prefix | |
45 | #define dout_prefix _prefix(_dout, mon, this) | |
46 | static ostream& _prefix(std::ostream *_dout, const Monitor *mon, | |
47 | const DataHealthService *svc) { | |
48 | assert(mon != NULL); | |
49 | assert(svc != NULL); | |
50 | return *_dout << "mon." << mon->name << "@" << mon->rank | |
51 | << "(" << mon->get_state_name() << ")." << svc->get_name() | |
52 | << "(" << svc->get_epoch() << ") "; | |
53 | } | |
54 | ||
55 | void DataHealthService::start_epoch() | |
56 | { | |
57 | dout(10) << __func__ << " epoch " << get_epoch() << dendl; | |
58 | // we are not bound by election epochs, but we should clear the stats | |
59 | // everytime an election is triggerd. As far as we know, a monitor might | |
60 | // have been running out of disk space and someone fixed it. We don't want | |
61 | // to hold the cluster back, even confusing the user, due to some possibly | |
62 | // outdated stats. | |
63 | stats.clear(); | |
64 | last_warned_percent = 0; | |
65 | } | |
66 | ||
67 | void DataHealthService::get_health( | |
68 | Formatter *f, | |
69 | list<pair<health_status_t,string> >& summary, | |
70 | list<pair<health_status_t,string> > *detail) | |
71 | { | |
72 | dout(10) << __func__ << dendl; | |
73 | if (f) { | |
74 | f->open_object_section("data_health"); | |
75 | f->open_array_section("mons"); | |
76 | } | |
77 | ||
78 | for (map<entity_inst_t,DataStats>::iterator it = stats.begin(); | |
79 | it != stats.end(); ++it) { | |
80 | string mon_name = mon->monmap->get_name(it->first.addr); | |
81 | DataStats& stats = it->second; | |
82 | ||
83 | health_status_t health_status = HEALTH_OK; | |
84 | string health_detail; | |
85 | if (stats.fs_stats.avail_percent <= g_conf->mon_data_avail_crit) { | |
86 | health_status = HEALTH_ERR; | |
87 | health_detail = "low disk space, shutdown imminent"; | |
88 | } else if (stats.fs_stats.avail_percent <= g_conf->mon_data_avail_warn) { | |
89 | health_status = HEALTH_WARN; | |
90 | health_detail = "low disk space"; | |
91 | } | |
92 | ||
93 | if (stats.store_stats.bytes_total >= g_conf->mon_data_size_warn) { | |
94 | if (health_status > HEALTH_WARN) | |
95 | health_status = HEALTH_WARN; | |
96 | if (!health_detail.empty()) | |
97 | health_detail.append("; "); | |
98 | stringstream ss; | |
99 | ss << "store is getting too big! " | |
100 | << prettybyte_t(stats.store_stats.bytes_total) | |
101 | << " >= " << prettybyte_t(g_conf->mon_data_size_warn); | |
102 | health_detail.append(ss.str()); | |
103 | } | |
104 | ||
105 | if (health_status != HEALTH_OK) { | |
106 | stringstream ss; | |
107 | ss << "mon." << mon_name << " " << health_detail; | |
108 | summary.push_back(make_pair(health_status, ss.str())); | |
109 | ss << " -- " << stats.fs_stats.avail_percent << "% avail"; | |
110 | if (detail) | |
111 | detail->push_back(make_pair(health_status, ss.str())); | |
112 | } | |
113 | ||
114 | if (f) { | |
115 | f->open_object_section("mon"); | |
116 | f->dump_string("name", mon_name.c_str()); | |
117 | // leave this unenclosed by an object section to avoid breaking backward-compatibility | |
118 | stats.dump(f); | |
119 | f->dump_stream("health") << health_status; | |
120 | if (health_status != HEALTH_OK) | |
121 | f->dump_string("health_detail", health_detail); | |
122 | f->close_section(); | |
123 | } | |
124 | } | |
125 | ||
126 | if (f) { | |
127 | f->close_section(); // mons | |
128 | f->close_section(); // data_health | |
129 | } | |
130 | } | |
131 | ||
132 | int DataHealthService::update_store_stats(DataStats &ours) | |
133 | { | |
134 | map<string,uint64_t> extra; | |
135 | uint64_t store_size = mon->store->get_estimated_size(extra); | |
136 | assert(store_size > 0); | |
137 | ||
138 | ours.store_stats.bytes_total = store_size; | |
139 | ours.store_stats.bytes_sst = extra["sst"]; | |
140 | ours.store_stats.bytes_log = extra["log"]; | |
141 | ours.store_stats.bytes_misc = extra["misc"]; | |
142 | ours.last_update = ceph_clock_now(); | |
143 | ||
144 | return 0; | |
145 | } | |
146 | ||
147 | ||
148 | int DataHealthService::update_stats() | |
149 | { | |
150 | entity_inst_t our_inst = mon->messenger->get_myinst(); | |
151 | DataStats& ours = stats[our_inst]; | |
152 | ||
153 | int err = get_fs_stats(ours.fs_stats, g_conf->mon_data.c_str()); | |
154 | if (err < 0) { | |
155 | derr << __func__ << " get_fs_stats error: " << cpp_strerror(err) << dendl; | |
156 | return err; | |
157 | } | |
158 | dout(0) << __func__ << " avail " << ours.fs_stats.avail_percent << "%" | |
159 | << " total " << prettybyte_t(ours.fs_stats.byte_total) | |
160 | << ", used " << prettybyte_t(ours.fs_stats.byte_used) | |
161 | << ", avail " << prettybyte_t(ours.fs_stats.byte_avail) << dendl; | |
162 | ours.last_update = ceph_clock_now(); | |
163 | ||
164 | return update_store_stats(ours); | |
165 | } | |
166 | ||
167 | void DataHealthService::share_stats() | |
168 | { | |
169 | dout(10) << __func__ << dendl; | |
170 | if (!in_quorum()) | |
171 | return; | |
172 | ||
173 | assert(!stats.empty()); | |
174 | entity_inst_t our_inst = mon->messenger->get_myinst(); | |
175 | assert(stats.count(our_inst) > 0); | |
176 | DataStats &ours = stats[our_inst]; | |
177 | const set<int>& quorum = mon->get_quorum(); | |
178 | for (set<int>::const_iterator it = quorum.begin(); | |
179 | it != quorum.end(); ++it) { | |
180 | if (mon->monmap->get_name(*it) == mon->name) | |
181 | continue; | |
182 | entity_inst_t inst = mon->monmap->get_inst(*it); | |
183 | MMonHealth *m = new MMonHealth(HealthService::SERVICE_HEALTH_DATA, | |
184 | MMonHealth::OP_TELL); | |
185 | m->data_stats = ours; | |
186 | dout(20) << __func__ << " send " << *m << " to " << inst << dendl; | |
187 | mon->messenger->send_message(m, inst); | |
188 | } | |
189 | } | |
190 | ||
191 | void DataHealthService::service_tick() | |
192 | { | |
193 | dout(10) << __func__ << dendl; | |
194 | ||
195 | int err = update_stats(); | |
196 | if (err < 0) { | |
197 | derr << "something went wrong obtaining our disk stats: " | |
198 | << cpp_strerror(err) << dendl; | |
199 | force_shutdown(); | |
200 | return; | |
201 | } | |
202 | if (in_quorum()) | |
203 | share_stats(); | |
204 | ||
205 | DataStats &ours = stats[mon->messenger->get_myinst()]; | |
206 | ||
207 | if (ours.fs_stats.avail_percent <= g_conf->mon_data_avail_crit) { | |
208 | derr << "reached critical levels of available space on local monitor storage" | |
209 | << " -- shutdown!" << dendl; | |
210 | force_shutdown(); | |
211 | return; | |
212 | } | |
213 | ||
214 | // we must backoff these warnings, and track how much data is being | |
215 | // consumed in-between reports to assess if it's worth to log this info, | |
216 | // otherwise we may very well contribute to the consumption of the | |
217 | // already low available disk space. | |
218 | if (ours.fs_stats.avail_percent <= g_conf->mon_data_avail_warn) { | |
219 | if (ours.fs_stats.avail_percent != last_warned_percent) | |
220 | mon->clog->warn() | |
221 | << "reached concerning levels of available space on local monitor storage" | |
222 | << " (" << ours.fs_stats.avail_percent << "% free)"; | |
223 | last_warned_percent = ours.fs_stats.avail_percent; | |
224 | } else { | |
225 | last_warned_percent = 0; | |
226 | } | |
227 | } | |
228 | ||
229 | void DataHealthService::handle_tell(MonOpRequestRef op) | |
230 | { | |
231 | op->mark_event("datahealth:handle_tell"); | |
232 | MMonHealth *m = static_cast<MMonHealth*>(op->get_req()); | |
233 | dout(10) << __func__ << " " << *m << dendl; | |
234 | assert(m->get_service_op() == MMonHealth::OP_TELL); | |
235 | ||
236 | stats[m->get_source_inst()] = m->data_stats; | |
237 | } | |
238 | ||
239 | bool DataHealthService::service_dispatch_op(MonOpRequestRef op) | |
240 | { | |
241 | op->mark_event("datahealth:service_dispatch_op"); | |
242 | MMonHealth *m = static_cast<MMonHealth*>(op->get_req()); | |
243 | dout(10) << __func__ << " " << *m << dendl; | |
244 | assert(m->get_service_type() == get_type()); | |
245 | if (!in_quorum()) { | |
246 | dout(1) << __func__ << " not in quorum -- drop message" << dendl; | |
247 | return false; | |
248 | } | |
249 | ||
250 | switch (m->service_op) { | |
251 | case MMonHealth::OP_TELL: | |
252 | // someone is telling us their stats | |
253 | handle_tell(op); | |
254 | break; | |
255 | default: | |
256 | dout(0) << __func__ << " unknown op " << m->service_op << dendl; | |
257 | assert(0 == "Unknown service op"); | |
258 | break; | |
259 | } | |
260 | return true; | |
261 | } |