]> git.proxmox.com Git - ceph.git/blob - ceph/src/mon/PGMap.h
update sources to v12.1.1
[ceph.git] / ceph / src / mon / PGMap.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 /*
16 * Placement Group Map. Placement Groups are logical sets of objects
17 * that are replicated by the same set of devices. pgid=(r,hash(o)&m)
18 * where & is a bit-wise AND and m=2^k-1
19 */
20
21 #ifndef CEPH_PGMAP_H
22 #define CEPH_PGMAP_H
23
24 #include "include/health.h"
25 #include "common/debug.h"
26 #include "common/TextTable.h"
27 #include "osd/osd_types.h"
28 #include "include/mempool.h"
29 #include "mon/health_check.h"
30 #include <sstream>
31 #include "mon/PGStatService.h"
32
33 // FIXME: don't like including this here to get OSDMap::Incremental, maybe
34 // PGMapUpdater needs its own header.
35 #include "osd/OSDMap.h"
36
37 namespace ceph { class Formatter; }
38
39 class PGMapDigest {
40 public:
41 MEMPOOL_CLASS_HELPERS();
42 virtual ~PGMapDigest() {}
43
44 mempool::pgmap::vector<uint64_t> osd_last_seq;
45
46 mutable std::map<int, int64_t> avail_space_by_rule;
47
48 // aggregate state, populated by PGMap child
49 int64_t num_pg = 0, num_osd = 0;
50 int64_t num_pg_active = 0;
51 int64_t num_pg_unknown = 0;
52 mempool::pgmap::unordered_map<int32_t,pool_stat_t> pg_pool_sum;
53 mempool::pgmap::map<int64_t,int64_t> num_pg_by_pool;
54 pool_stat_t pg_sum;
55 osd_stat_t osd_sum;
56 mempool::pgmap::unordered_map<int32_t,int32_t> num_pg_by_state;
57 struct pg_count {
58 int32_t acting = 0;
59 int32_t up = 0;
60 int32_t primary = 0;
61 void encode(bufferlist& bl) const {
62 ::encode(acting, bl);
63 ::encode(up, bl);
64 ::encode(primary, bl);
65 }
66 void decode(bufferlist::iterator& p) {
67 ::decode(acting, p);
68 ::decode(up, p);
69 ::decode(primary, p);
70 }
71 };
72 mempool::pgmap::unordered_map<int32_t,pg_count> num_pg_by_osd;
73
74 // recent deltas, and summation
75 /**
76 * keep track of last deltas for each pool, calculated using
77 * @p pg_pool_sum as baseline.
78 */
79 mempool::pgmap::unordered_map<uint64_t, mempool::pgmap::list< pair<pool_stat_t, utime_t> > > per_pool_sum_deltas;
80 /**
81 * keep track of per-pool timestamp deltas, according to last update on
82 * each pool.
83 */
84 mempool::pgmap::unordered_map<uint64_t, utime_t> per_pool_sum_deltas_stamps;
85 /**
86 * keep track of sum deltas, per-pool, taking into account any previous
87 * deltas existing in @p per_pool_sum_deltas. The utime_t as second member
88 * of the pair is the timestamp refering to the last update (i.e., the first
89 * member of the pair) for a given pool.
90 */
91 mempool::pgmap::unordered_map<uint64_t, pair<pool_stat_t,utime_t> > per_pool_sum_delta;
92
93 pool_stat_t pg_sum_delta;
94 utime_t stamp_delta;
95
96
97 void print_summary(Formatter *f, ostream *out) const;
98 void print_oneline_summary(Formatter *f, ostream *out) const;
99
100 void recovery_summary(Formatter *f, list<string> *psl,
101 const pool_stat_t& delta_sum) const;
102 void overall_recovery_summary(Formatter *f, list<string> *psl) const;
103 void pool_recovery_summary(Formatter *f, list<string> *psl,
104 uint64_t poolid) const;
105 void recovery_rate_summary(Formatter *f, ostream *out,
106 const pool_stat_t& delta_sum,
107 utime_t delta_stamp) const;
108 void overall_recovery_rate_summary(Formatter *f, ostream *out) const;
109 void pool_recovery_rate_summary(Formatter *f, ostream *out,
110 uint64_t poolid) const;
111 /**
112 * Obtain a formatted/plain output for client I/O, source from stats for a
113 * given @p delta_sum pool over a given @p delta_stamp period of time.
114 */
115 void client_io_rate_summary(Formatter *f, ostream *out,
116 const pool_stat_t& delta_sum,
117 utime_t delta_stamp) const;
118 /**
119 * Obtain a formatted/plain output for the overall client I/O, which is
120 * calculated resorting to @p pg_sum_delta and @p stamp_delta.
121 */
122 void overall_client_io_rate_summary(Formatter *f, ostream *out) const;
123 /**
124 * Obtain a formatted/plain output for client I/O over a given pool
125 * with id @p pool_id. We will then obtain pool-specific data
126 * from @p per_pool_sum_delta.
127 */
128 void pool_client_io_rate_summary(Formatter *f, ostream *out,
129 uint64_t poolid) const;
130 /**
131 * Obtain a formatted/plain output for cache tier IO, source from stats for a
132 * given @p delta_sum pool over a given @p delta_stamp period of time.
133 */
134 void cache_io_rate_summary(Formatter *f, ostream *out,
135 const pool_stat_t& delta_sum,
136 utime_t delta_stamp) const;
137 /**
138 * Obtain a formatted/plain output for the overall cache tier IO, which is
139 * calculated resorting to @p pg_sum_delta and @p stamp_delta.
140 */
141 void overall_cache_io_rate_summary(Formatter *f, ostream *out) const;
142 /**
143 * Obtain a formatted/plain output for cache tier IO over a given pool
144 * with id @p pool_id. We will then obtain pool-specific data
145 * from @p per_pool_sum_delta.
146 */
147 void pool_cache_io_rate_summary(Formatter *f, ostream *out,
148 uint64_t poolid) const;
149
150 virtual void dump_pool_stats_full(const OSDMap &osd_map, stringstream *ss,
151 Formatter *f, bool verbose) const;
152 void dump_fs_stats(stringstream *ss, Formatter *f, bool verbose) const;
153 static void dump_object_stat_sum(TextTable &tbl, Formatter *f,
154 const object_stat_sum_t &sum,
155 uint64_t avail,
156 float raw_used_rate,
157 bool verbose, const pg_pool_t *pool);
158
159 size_t get_num_pg_by_osd(int osd) const {
160 auto p = num_pg_by_osd.find(osd);
161 if (p == num_pg_by_osd.end())
162 return 0;
163 else
164 return p->second.acting;
165 }
166 int get_num_primary_pg_by_osd(int osd) const {
167 auto p = num_pg_by_osd.find(osd);
168 if (p == num_pg_by_osd.end())
169 return 0;
170 else
171 return p->second.primary;
172 }
173
174 ceph_statfs get_statfs() const {
175 ceph_statfs statfs;
176 // these are in KB.
177 statfs.kb = osd_sum.kb;
178 statfs.kb_used = osd_sum.kb_used;
179 statfs.kb_avail = osd_sum.kb_avail;
180 statfs.num_objects = pg_sum.stats.sum.num_objects;
181 return statfs;
182 }
183
184 int64_t get_rule_avail(int ruleno) const {
185 auto i = avail_space_by_rule.find(ruleno);
186 if (i != avail_space_by_rule.end())
187 return avail_space_by_rule[ruleno];
188 else
189 return 0;
190 }
191
192 // kill me post-luminous:
193 virtual float get_fallback_full_ratio() const {
194 return .95;
195 }
196
197 uint64_t get_last_osd_stat_seq(int osd) {
198 if (osd < (int)osd_last_seq.size())
199 return osd_last_seq[osd];
200 return 0;
201 }
202
203 void encode(bufferlist& bl, uint64_t features) const;
204 void decode(bufferlist::iterator& p);
205 void dump(Formatter *f) const;
206 static void generate_test_instances(list<PGMapDigest*>& ls);
207 };
208 WRITE_CLASS_ENCODER(PGMapDigest::pg_count);
209 WRITE_CLASS_ENCODER_FEATURES(PGMapDigest);
210
211 class PGMap : public PGMapDigest {
212 public:
213 MEMPOOL_CLASS_HELPERS();
214
215 // the map
216 version_t version;
217 epoch_t last_osdmap_epoch; // last osdmap epoch i applied to the pgmap
218 epoch_t last_pg_scan; // osdmap epoch
219 mempool::pgmap::unordered_map<int32_t,osd_stat_t> osd_stat;
220 mempool::pgmap::unordered_map<pg_t,pg_stat_t> pg_stat;
221 mempool::pgmap::set<int32_t> full_osds; // for pre-luminous only
222 mempool::pgmap::set<int32_t> nearfull_osds; // for pre-luminous only
223 float full_ratio;
224 float nearfull_ratio;
225
226 // mapping of osd to most recently reported osdmap epoch
227 mempool::pgmap::unordered_map<int32_t,epoch_t> osd_epochs;
228
229 class Incremental {
230 public:
231 MEMPOOL_CLASS_HELPERS();
232 version_t version;
233 mempool::pgmap::map<pg_t,pg_stat_t> pg_stat_updates;
234 epoch_t osdmap_epoch;
235 epoch_t pg_scan; // osdmap epoch
236 mempool::pgmap::set<pg_t> pg_remove;
237 float full_ratio;
238 float nearfull_ratio;
239 utime_t stamp;
240
241 private:
242 mempool::pgmap::map<int32_t,osd_stat_t> osd_stat_updates;
243 mempool::pgmap::set<int32_t> osd_stat_rm;
244
245 // mapping of osd to most recently reported osdmap epoch.
246 // 1:1 with osd_stat_updates.
247 mempool::pgmap::map<int32_t,epoch_t> osd_epochs;
248 public:
249
250 const mempool::pgmap::map<int32_t, osd_stat_t> &get_osd_stat_updates() const {
251 return osd_stat_updates;
252 }
253 const mempool::pgmap::set<int32_t> &get_osd_stat_rm() const {
254 return osd_stat_rm;
255 }
256 const mempool::pgmap::map<int32_t, epoch_t> &get_osd_epochs() const {
257 return osd_epochs;
258 }
259
260 template<typename OsdStat>
261 void update_stat(int32_t osd, epoch_t epoch, OsdStat&& stat) {
262 osd_stat_updates[osd] = std::forward<OsdStat>(stat);
263 osd_epochs[osd] = epoch;
264 assert(osd_epochs.size() == osd_stat_updates.size());
265 }
266 void stat_osd_out(int32_t osd, epoch_t epoch) {
267 // 0 the stats for the osd
268 osd_stat_updates[osd] = osd_stat_t();
269 // only fill in the epoch if the osd didn't already report htis
270 // epoch. that way we zero the stat but still preserve a reported
271 // new epoch...
272 if (!osd_epochs.count(osd))
273 osd_epochs[osd] = epoch;
274 // ...and maintain our invariant.
275 assert(osd_epochs.size() == osd_stat_updates.size());
276 }
277 void stat_osd_down_up(int32_t osd, epoch_t epoch, const PGMap& pg_map) {
278 // 0 the op_queue_age_hist for this osd
279 auto p = osd_stat_updates.find(osd);
280 if (p != osd_stat_updates.end()) {
281 p->second.op_queue_age_hist.clear();
282 return;
283 }
284 auto q = pg_map.osd_stat.find(osd);
285 if (q != pg_map.osd_stat.end()) {
286 osd_stat_t& t = osd_stat_updates[osd] = q->second;
287 t.op_queue_age_hist.clear();
288 osd_epochs[osd] = epoch;
289 }
290 }
291 void rm_stat(int32_t osd) {
292 osd_stat_rm.insert(osd);
293 osd_epochs.erase(osd);
294 osd_stat_updates.erase(osd);
295 }
296 void encode(bufferlist &bl, uint64_t features=-1) const;
297 void decode(bufferlist::iterator &bl);
298 void dump(Formatter *f) const;
299 static void generate_test_instances(list<Incremental*>& o);
300
301 Incremental() : version(0), osdmap_epoch(0), pg_scan(0),
302 full_ratio(0), nearfull_ratio(0) {}
303 };
304
305
306 // aggregate stats (soft state), generated by calc_stats()
307 mutable epoch_t min_last_epoch_clean = 0;
308 mempool::pgmap::unordered_map<int,set<pg_t> > pg_by_osd;
309 mempool::pgmap::unordered_map<int,int> blocked_by_sum;
310 mempool::pgmap::list< pair<pool_stat_t, utime_t> > pg_sum_deltas;
311
312 utime_t stamp;
313
314 void update_global_delta(
315 CephContext *cct,
316 const utime_t ts, const pool_stat_t& pg_sum_old);
317 void update_pool_deltas(
318 CephContext *cct,
319 const utime_t ts,
320 const mempool::pgmap::unordered_map<uint64_t, pool_stat_t>& pg_pool_sum_old);
321 void clear_delta();
322
323 void deleted_pool(int64_t pool) {
324 pg_pool_sum.erase(pool);
325 num_pg_by_pool.erase(pool);
326 per_pool_sum_deltas.erase(pool);
327 per_pool_sum_deltas_stamps.erase(pool);
328 per_pool_sum_delta.erase(pool);
329 }
330
331 private:
332 void update_delta(
333 CephContext *cct,
334 const utime_t ts,
335 const pool_stat_t& old_pool_sum,
336 utime_t *last_ts,
337 const pool_stat_t& current_pool_sum,
338 pool_stat_t *result_pool_delta,
339 utime_t *result_ts_delta,
340 mempool::pgmap::list<pair<pool_stat_t,utime_t> > *delta_avg_list);
341
342 void update_one_pool_delta(CephContext *cct,
343 const utime_t ts,
344 const uint64_t pool,
345 const pool_stat_t& old_pool_sum);
346
347 epoch_t calc_min_last_epoch_clean() const;
348
349 public:
350
351 mempool::pgmap::set<pg_t> creating_pgs;
352 mempool::pgmap::map<int,map<epoch_t,set<pg_t> > > creating_pgs_by_osd_epoch;
353
354 // Bits that use to be enum StuckPG
355 static const int STUCK_INACTIVE = (1<<0);
356 static const int STUCK_UNCLEAN = (1<<1);
357 static const int STUCK_UNDERSIZED = (1<<2);
358 static const int STUCK_DEGRADED = (1<<3);
359 static const int STUCK_STALE = (1<<4);
360
361 PGMap()
362 : version(0),
363 last_osdmap_epoch(0), last_pg_scan(0),
364 full_ratio(0), nearfull_ratio(0)
365 {}
366
367 void set_full_ratios(float full, float nearfull) {
368 if (full_ratio == full && nearfull_ratio == nearfull)
369 return;
370 full_ratio = full;
371 nearfull_ratio = nearfull;
372 redo_full_sets();
373 }
374
375 version_t get_version() const {
376 return version;
377 }
378 void set_version(version_t v) {
379 version = v;
380 }
381 epoch_t get_last_osdmap_epoch() const {
382 return last_osdmap_epoch;
383 }
384 void set_last_osdmap_epoch(epoch_t e) {
385 last_osdmap_epoch = e;
386 }
387 epoch_t get_last_pg_scan() const {
388 return last_pg_scan;
389 }
390 void set_last_pg_scan(epoch_t e) {
391 last_pg_scan = e;
392 }
393 utime_t get_stamp() const {
394 return stamp;
395 }
396 void set_stamp(utime_t s) {
397 stamp = s;
398 }
399
400 pool_stat_t get_pg_pool_sum_stat(int64_t pool) const {
401 auto p = pg_pool_sum.find(pool);
402 if (p != pg_pool_sum.end())
403 return p->second;
404 return pool_stat_t();
405 }
406
407
408 void update_pg(pg_t pgid, bufferlist& bl);
409 void remove_pg(pg_t pgid);
410 void update_osd(int osd, bufferlist& bl);
411 void remove_osd(int osd);
412
413 void apply_incremental(CephContext *cct, const Incremental& inc);
414 void redo_full_sets();
415 void register_nearfull_status(int osd, const osd_stat_t& s);
416 void calc_stats();
417 void stat_pg_add(const pg_t &pgid, const pg_stat_t &s,
418 bool sameosds=false);
419 void stat_pg_sub(const pg_t &pgid, const pg_stat_t &s,
420 bool sameosds=false);
421 void stat_pg_update(const pg_t pgid, pg_stat_t &prev, bufferlist::iterator& blp);
422 void stat_osd_add(int osd, const osd_stat_t &s);
423 void stat_osd_sub(int osd, const osd_stat_t &s);
424
425 void encode(bufferlist &bl, uint64_t features=-1) const;
426 void decode(bufferlist::iterator &bl);
427
428 /// encode subset of our data to a PGMapDigest
429 void encode_digest(const OSDMap& osdmap,
430 bufferlist& bl, uint64_t features) const;
431
432 void dirty_all(Incremental& inc);
433
434 int64_t get_rule_avail(const OSDMap& osdmap, int ruleno) const;
435 void get_rules_avail(const OSDMap& osdmap,
436 std::map<int,int64_t> *avail_map) const;
437 void dump(Formatter *f) const;
438 void dump_basic(Formatter *f) const;
439 void dump_pg_stats(Formatter *f, bool brief) const;
440 void dump_pool_stats(Formatter *f) const;
441 void dump_osd_stats(Formatter *f) const;
442 void dump_delta(Formatter *f) const;
443 void dump_filtered_pg_stats(Formatter *f, set<pg_t>& pgs) const;
444 void dump_pool_stats_full(const OSDMap &osd_map, stringstream *ss,
445 Formatter *f, bool verbose) const override {
446 get_rules_avail(osd_map, &avail_space_by_rule);
447 PGMapDigest::dump_pool_stats_full(osd_map, ss, f, verbose);
448 }
449
450 void dump_pg_stats_plain(
451 ostream& ss,
452 const mempool::pgmap::unordered_map<pg_t, pg_stat_t>& pg_stats,
453 bool brief) const;
454 void get_stuck_stats(
455 int types, const utime_t cutoff,
456 mempool::pgmap::unordered_map<pg_t, pg_stat_t>& stuck_pgs) const;
457 bool get_stuck_counts(const utime_t cutoff, map<string, int>& note) const;
458 void dump_stuck(Formatter *f, int types, utime_t cutoff) const;
459 void dump_stuck_plain(ostream& ss, int types, utime_t cutoff) const;
460 int dump_stuck_pg_stats(stringstream &ds,
461 Formatter *f,
462 int threshold,
463 vector<string>& args) const;
464 void dump(ostream& ss) const;
465 void dump_basic(ostream& ss) const;
466 void dump_pg_stats(ostream& ss, bool brief) const;
467 void dump_pg_sum_stats(ostream& ss, bool header) const;
468 void dump_pool_stats(ostream& ss, bool header) const;
469 void dump_osd_stats(ostream& ss) const;
470 void dump_osd_sum_stats(ostream& ss) const;
471 void dump_filtered_pg_stats(ostream& ss, set<pg_t>& pgs) const;
472
473 void dump_osd_perf_stats(Formatter *f) const;
474 void print_osd_perf_stats(std::ostream *ss) const;
475
476 void dump_osd_blocked_by_stats(Formatter *f) const;
477 void print_osd_blocked_by_stats(std::ostream *ss) const;
478
479 void get_filtered_pg_stats(uint32_t state, int64_t poolid, int64_t osdid,
480 bool primary, set<pg_t>& pgs) const;
481
482 epoch_t get_min_last_epoch_clean() const {
483 if (!min_last_epoch_clean)
484 min_last_epoch_clean = calc_min_last_epoch_clean();
485 return min_last_epoch_clean;
486 }
487
488 float get_fallback_full_ratio() const override {
489 if (full_ratio > 0) {
490 return full_ratio;
491 }
492 return .95;
493 }
494
495 void get_health(CephContext *cct,
496 const OSDMap& osdmap,
497 list<pair<health_status_t,string> >& summary,
498 list<pair<health_status_t,string> > *detail) const;
499
500 void get_health_checks(
501 CephContext *cct,
502 const OSDMap& osdmap,
503 health_check_map_t *checks) const;
504
505 static void generate_test_instances(list<PGMap*>& o);
506 };
507 WRITE_CLASS_ENCODER_FEATURES(PGMap::Incremental)
508 WRITE_CLASS_ENCODER_FEATURES(PGMap)
509
510 inline ostream& operator<<(ostream& out, const PGMapDigest& m) {
511 m.print_oneline_summary(NULL, &out);
512 return out;
513 }
514
515 int process_pg_map_command(
516 const string& prefix,
517 const map<string,cmd_vartype>& cmdmap,
518 const PGMap& pg_map,
519 const OSDMap& osdmap,
520 Formatter *f,
521 stringstream *ss,
522 bufferlist *odata);
523
524 class PGMapUpdater
525 {
526 public:
527 static void check_osd_map(
528 const OSDMap::Incremental &osd_inc,
529 std::set<int> *need_check_down_pg_osds,
530 std::map<int,utime_t> *last_osd_report,
531 PGMap *pg_map,
532 PGMap::Incremental *pending_inc);
533
534 static void check_osd_map(
535 CephContext *cct,
536 const OSDMap &osdmap,
537 const PGMap& pg_map,
538 PGMap::Incremental *pending_inc);
539 /**
540 * check latest osdmap for new pgs to register
541 */
542 static void register_new_pgs(
543 const OSDMap &osd_map,
544 const PGMap &pg_map,
545 PGMap::Incremental *pending_inc);
546
547 /**
548 * recalculate creating pg mappings
549 */
550 static void update_creating_pgs(
551 const OSDMap &osd_map,
552 const PGMap &pg_map,
553 PGMap::Incremental *pending_inc);
554
555 static void register_pg(
556 const OSDMap &osd_map,
557 pg_t pgid, epoch_t epoch,
558 bool new_pool,
559 const PGMap &pg_map,
560 PGMap::Incremental *pending_inc);
561
562 // mark pg's state stale if its acting primary osd is down
563 static void check_down_pgs(
564 const OSDMap &osd_map,
565 const PGMap &pg_map,
566 bool check_all,
567 const set<int>& need_check_down_pg_osds,
568 PGMap::Incremental *pending_inc);
569 };
570
571 namespace reweight {
572 /* Assign a lower weight to overloaded OSDs.
573 *
574 * The osds that will get a lower weight are those with with a utilization
575 * percentage 'oload' percent greater than the average utilization.
576 */
577 int by_utilization(const OSDMap &osd_map,
578 const PGMap &pg_map,
579 int oload,
580 double max_changef,
581 int max_osds,
582 bool by_pg, const set<int64_t> *pools,
583 bool no_increasing,
584 mempool::osdmap::map<int32_t, uint32_t>* new_weights,
585 std::stringstream *ss,
586 std::string *out_str,
587 Formatter *f);
588 }
589
590
591 class PGMapStatService : virtual public PGStatService {
592 protected:
593 const PGMap& pgmap;
594 public:
595 PGMapStatService(const PGMap& o)
596 : pgmap(o) {}
597
598 bool is_readable() const override { return true; }
599
600 const pool_stat_t* get_pool_stat(int poolid) const override {
601 auto i = pgmap.pg_pool_sum.find(poolid);
602 if (i != pgmap.pg_pool_sum.end()) {
603 return &i->second;
604 }
605 return nullptr;
606 }
607
608 const osd_stat_t& get_osd_sum() const override { return pgmap.osd_sum; }
609
610 const osd_stat_t *get_osd_stat(int osd) const override {
611 auto i = pgmap.osd_stat.find(osd);
612 if (i == pgmap.osd_stat.end()) {
613 return nullptr;
614 }
615 return &i->second;
616 }
617 const mempool::pgmap::unordered_map<int32_t,osd_stat_t>& get_osd_stat() const override {
618 return pgmap.osd_stat;
619 }
620 float get_full_ratio() const override { return pgmap.full_ratio; }
621 float get_nearfull_ratio() const override { return pgmap.nearfull_ratio; }
622
623 bool have_creating_pgs() const override {
624 return !pgmap.creating_pgs.empty();
625 }
626 bool is_creating_pg(pg_t pgid) const override {
627 return pgmap.creating_pgs.count(pgid);
628 }
629
630 epoch_t get_min_last_epoch_clean() const override {
631 return pgmap.get_min_last_epoch_clean();
632 }
633
634 bool have_full_osds() const override { return !pgmap.full_osds.empty(); }
635 bool have_nearfull_osds() const override {
636 return !pgmap.nearfull_osds.empty();
637 }
638
639 size_t get_num_pg_by_osd(int osd) const override {
640 return pgmap.get_num_pg_by_osd(osd);
641 }
642 ceph_statfs get_statfs() const override {
643 ceph_statfs statfs;
644 statfs.kb = pgmap.osd_sum.kb;
645 statfs.kb_used = pgmap.osd_sum.kb_used;
646 statfs.kb_avail = pgmap.osd_sum.kb_avail;
647 statfs.num_objects = pgmap.pg_sum.stats.sum.num_objects;
648 return statfs;
649 }
650 void print_summary(Formatter *f, ostream *out) const override {
651 pgmap.print_summary(f, out);
652 }
653 virtual void dump_info(Formatter *f) const override {
654 f->dump_object("pgmap", pgmap);
655 }
656 void dump_fs_stats(stringstream *ss,
657 Formatter *f,
658 bool verbose) const override {
659 pgmap.dump_fs_stats(ss, f, verbose);
660 }
661 void dump_pool_stats(const OSDMap& osdm, stringstream *ss, Formatter *f,
662 bool verbose) const override {
663 pgmap.dump_pool_stats_full(osdm, ss, f, verbose);
664 }
665
666 int process_pg_command(const string& prefix,
667 const map<string,cmd_vartype>& cmdmap,
668 const OSDMap& osdmap,
669 Formatter *f,
670 stringstream *ss,
671 bufferlist *odata) const override {
672 return process_pg_map_command(prefix, cmdmap, pgmap, osdmap, f, ss, odata);
673 }
674 };
675
676
677 #endif