]> git.proxmox.com Git - ceph.git/blob - ceph/src/mon/PGMap.h
6d58e6b2546d75001a302d74711f555c12e1ac2b
[ceph.git] / ceph / src / mon / PGMap.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 /*
16 * Placement Group Map. Placement Groups are logical sets of objects
17 * that are replicated by the same set of devices. pgid=(r,hash(o)&m)
18 * where & is a bit-wise AND and m=2^k-1
19 */
20
21 #ifndef CEPH_PGMAP_H
22 #define CEPH_PGMAP_H
23
24 #include "common/debug.h"
25 #include "common/TextTable.h"
26 #include "osd/osd_types.h"
27 #include "include/mempool.h"
28 #include <sstream>
29 #include "mon/PGStatService.h"
30
31 // FIXME: don't like including this here to get OSDMap::Incremental, maybe
32 // PGMapUpdater needs its own header.
33 #include "osd/OSDMap.h"
34
35 namespace ceph { class Formatter; }
36
37 class PGMapDigest {
38 public:
39 MEMPOOL_CLASS_HELPERS();
40 virtual ~PGMapDigest() {}
41
42 mempool::pgmap::vector<uint64_t> osd_last_seq;
43
44 mutable std::map<int, int64_t> avail_space_by_rule;
45
46 // aggregate state, populated by PGMap child
47 int64_t num_pg = 0, num_osd = 0;
48 int64_t num_pg_active = 0;
49 int64_t num_pg_unknown = 0;
50 mempool::pgmap::unordered_map<int32_t,pool_stat_t> pg_pool_sum;
51 mempool::pgmap::map<int64_t,int64_t> num_pg_by_pool;
52 pool_stat_t pg_sum;
53 osd_stat_t osd_sum;
54 mempool::pgmap::unordered_map<int32_t,int32_t> num_pg_by_state;
55 struct pg_count {
56 int32_t acting = 0;
57 int32_t up = 0;
58 int32_t primary = 0;
59 void encode(bufferlist& bl) const {
60 ::encode(acting, bl);
61 ::encode(up, bl);
62 ::encode(primary, bl);
63 }
64 void decode(bufferlist::iterator& p) {
65 ::decode(acting, p);
66 ::decode(up, p);
67 ::decode(primary, p);
68 }
69 };
70 mempool::pgmap::unordered_map<int32_t,pg_count> num_pg_by_osd;
71
72 // recent deltas, and summation
73 /**
74 * keep track of last deltas for each pool, calculated using
75 * @p pg_pool_sum as baseline.
76 */
77 mempool::pgmap::unordered_map<uint64_t, mempool::pgmap::list< pair<pool_stat_t, utime_t> > > per_pool_sum_deltas;
78 /**
79 * keep track of per-pool timestamp deltas, according to last update on
80 * each pool.
81 */
82 mempool::pgmap::unordered_map<uint64_t, utime_t> per_pool_sum_deltas_stamps;
83 /**
84 * keep track of sum deltas, per-pool, taking into account any previous
85 * deltas existing in @p per_pool_sum_deltas. The utime_t as second member
86 * of the pair is the timestamp refering to the last update (i.e., the first
87 * member of the pair) for a given pool.
88 */
89 mempool::pgmap::unordered_map<uint64_t, pair<pool_stat_t,utime_t> > per_pool_sum_delta;
90
91 pool_stat_t pg_sum_delta;
92 utime_t stamp_delta;
93
94
95 void print_summary(Formatter *f, ostream *out) const;
96 void print_oneline_summary(Formatter *f, ostream *out) const;
97
98 void recovery_summary(Formatter *f, list<string> *psl,
99 const pool_stat_t& delta_sum) const;
100 void overall_recovery_summary(Formatter *f, list<string> *psl) const;
101 void pool_recovery_summary(Formatter *f, list<string> *psl,
102 uint64_t poolid) const;
103 void recovery_rate_summary(Formatter *f, ostream *out,
104 const pool_stat_t& delta_sum,
105 utime_t delta_stamp) const;
106 void overall_recovery_rate_summary(Formatter *f, ostream *out) const;
107 void pool_recovery_rate_summary(Formatter *f, ostream *out,
108 uint64_t poolid) const;
109 /**
110 * Obtain a formatted/plain output for client I/O, source from stats for a
111 * given @p delta_sum pool over a given @p delta_stamp period of time.
112 */
113 void client_io_rate_summary(Formatter *f, ostream *out,
114 const pool_stat_t& delta_sum,
115 utime_t delta_stamp) const;
116 /**
117 * Obtain a formatted/plain output for the overall client I/O, which is
118 * calculated resorting to @p pg_sum_delta and @p stamp_delta.
119 */
120 void overall_client_io_rate_summary(Formatter *f, ostream *out) const;
121 /**
122 * Obtain a formatted/plain output for client I/O over a given pool
123 * with id @p pool_id. We will then obtain pool-specific data
124 * from @p per_pool_sum_delta.
125 */
126 void pool_client_io_rate_summary(Formatter *f, ostream *out,
127 uint64_t poolid) const;
128 /**
129 * Obtain a formatted/plain output for cache tier IO, source from stats for a
130 * given @p delta_sum pool over a given @p delta_stamp period of time.
131 */
132 void cache_io_rate_summary(Formatter *f, ostream *out,
133 const pool_stat_t& delta_sum,
134 utime_t delta_stamp) const;
135 /**
136 * Obtain a formatted/plain output for the overall cache tier IO, which is
137 * calculated resorting to @p pg_sum_delta and @p stamp_delta.
138 */
139 void overall_cache_io_rate_summary(Formatter *f, ostream *out) const;
140 /**
141 * Obtain a formatted/plain output for cache tier IO over a given pool
142 * with id @p pool_id. We will then obtain pool-specific data
143 * from @p per_pool_sum_delta.
144 */
145 void pool_cache_io_rate_summary(Formatter *f, ostream *out,
146 uint64_t poolid) const;
147
148 virtual void dump_pool_stats_full(const OSDMap &osd_map, stringstream *ss,
149 Formatter *f, bool verbose) const;
150 void dump_fs_stats(stringstream *ss, Formatter *f, bool verbose) const;
151 static void dump_object_stat_sum(TextTable &tbl, Formatter *f,
152 const object_stat_sum_t &sum,
153 uint64_t avail,
154 float raw_used_rate,
155 bool verbose, const pg_pool_t *pool);
156
157 size_t get_num_pg_by_osd(int osd) const {
158 auto p = num_pg_by_osd.find(osd);
159 if (p == num_pg_by_osd.end())
160 return 0;
161 else
162 return p->second.acting;
163 }
164 int get_num_primary_pg_by_osd(int osd) const {
165 auto p = num_pg_by_osd.find(osd);
166 if (p == num_pg_by_osd.end())
167 return 0;
168 else
169 return p->second.primary;
170 }
171
172 ceph_statfs get_statfs() const {
173 ceph_statfs statfs;
174 // these are in KB.
175 statfs.kb = osd_sum.kb;
176 statfs.kb_used = osd_sum.kb_used;
177 statfs.kb_avail = osd_sum.kb_avail;
178 statfs.num_objects = pg_sum.stats.sum.num_objects;
179 return statfs;
180 }
181
182 int64_t get_rule_avail(int ruleno) const {
183 auto i = avail_space_by_rule.find(ruleno);
184 if (i != avail_space_by_rule.end())
185 return avail_space_by_rule[ruleno];
186 else
187 return 0;
188 }
189
190 // kill me post-luminous:
191 virtual float get_fallback_full_ratio() const {
192 return .95;
193 }
194
195 uint64_t get_last_osd_stat_seq(int osd) {
196 if (osd < (int)osd_last_seq.size())
197 return osd_last_seq[osd];
198 return 0;
199 }
200
201 void encode(bufferlist& bl, uint64_t features) const;
202 void decode(bufferlist::iterator& p);
203 void dump(Formatter *f) const;
204 static void generate_test_instances(list<PGMapDigest*>& ls);
205 };
206 WRITE_CLASS_ENCODER(PGMapDigest::pg_count);
207 WRITE_CLASS_ENCODER_FEATURES(PGMapDigest);
208
209 class PGMap : public PGMapDigest {
210 public:
211 MEMPOOL_CLASS_HELPERS();
212
213 // the map
214 version_t version;
215 epoch_t last_osdmap_epoch; // last osdmap epoch i applied to the pgmap
216 epoch_t last_pg_scan; // osdmap epoch
217 mempool::pgmap::unordered_map<int32_t,osd_stat_t> osd_stat;
218 mempool::pgmap::unordered_map<pg_t,pg_stat_t> pg_stat;
219 mempool::pgmap::set<int32_t> full_osds; // for pre-luminous only
220 mempool::pgmap::set<int32_t> nearfull_osds; // for pre-luminous only
221 float full_ratio;
222 float nearfull_ratio;
223
224 // mapping of osd to most recently reported osdmap epoch
225 mempool::pgmap::unordered_map<int32_t,epoch_t> osd_epochs;
226
227 class Incremental {
228 public:
229 MEMPOOL_CLASS_HELPERS();
230 version_t version;
231 mempool::pgmap::map<pg_t,pg_stat_t> pg_stat_updates;
232 epoch_t osdmap_epoch;
233 epoch_t pg_scan; // osdmap epoch
234 mempool::pgmap::set<pg_t> pg_remove;
235 float full_ratio;
236 float nearfull_ratio;
237 utime_t stamp;
238
239 private:
240 mempool::pgmap::map<int32_t,osd_stat_t> osd_stat_updates;
241 mempool::pgmap::set<int32_t> osd_stat_rm;
242
243 // mapping of osd to most recently reported osdmap epoch.
244 // 1:1 with osd_stat_updates.
245 mempool::pgmap::map<int32_t,epoch_t> osd_epochs;
246 public:
247
248 const mempool::pgmap::map<int32_t, osd_stat_t> &get_osd_stat_updates() const {
249 return osd_stat_updates;
250 }
251 const mempool::pgmap::set<int32_t> &get_osd_stat_rm() const {
252 return osd_stat_rm;
253 }
254 const mempool::pgmap::map<int32_t, epoch_t> &get_osd_epochs() const {
255 return osd_epochs;
256 }
257
258 template<typename OsdStat>
259 void update_stat(int32_t osd, epoch_t epoch, OsdStat&& stat) {
260 osd_stat_updates[osd] = std::forward<OsdStat>(stat);
261 osd_epochs[osd] = epoch;
262 assert(osd_epochs.size() == osd_stat_updates.size());
263 }
264 void stat_osd_out(int32_t osd, epoch_t epoch) {
265 // 0 the stats for the osd
266 osd_stat_updates[osd] = osd_stat_t();
267 // only fill in the epoch if the osd didn't already report htis
268 // epoch. that way we zero the stat but still preserve a reported
269 // new epoch...
270 if (!osd_epochs.count(osd))
271 osd_epochs[osd] = epoch;
272 // ...and maintain our invariant.
273 assert(osd_epochs.size() == osd_stat_updates.size());
274 }
275 void stat_osd_down_up(int32_t osd, epoch_t epoch, const PGMap& pg_map) {
276 // 0 the op_queue_age_hist for this osd
277 auto p = osd_stat_updates.find(osd);
278 if (p != osd_stat_updates.end()) {
279 p->second.op_queue_age_hist.clear();
280 return;
281 }
282 auto q = pg_map.osd_stat.find(osd);
283 if (q != pg_map.osd_stat.end()) {
284 osd_stat_t& t = osd_stat_updates[osd] = q->second;
285 t.op_queue_age_hist.clear();
286 osd_epochs[osd] = epoch;
287 }
288 }
289 void rm_stat(int32_t osd) {
290 osd_stat_rm.insert(osd);
291 osd_epochs.erase(osd);
292 osd_stat_updates.erase(osd);
293 }
294 void encode(bufferlist &bl, uint64_t features=-1) const;
295 void decode(bufferlist::iterator &bl);
296 void dump(Formatter *f) const;
297 static void generate_test_instances(list<Incremental*>& o);
298
299 Incremental() : version(0), osdmap_epoch(0), pg_scan(0),
300 full_ratio(0), nearfull_ratio(0) {}
301 };
302
303
304 // aggregate stats (soft state), generated by calc_stats()
305 mutable epoch_t min_last_epoch_clean = 0;
306 mempool::pgmap::unordered_map<int,set<pg_t> > pg_by_osd;
307 mempool::pgmap::unordered_map<int,int> blocked_by_sum;
308 mempool::pgmap::list< pair<pool_stat_t, utime_t> > pg_sum_deltas;
309
310 utime_t stamp;
311
312 void update_global_delta(
313 CephContext *cct,
314 const utime_t ts, const pool_stat_t& pg_sum_old);
315 void update_pool_deltas(
316 CephContext *cct,
317 const utime_t ts,
318 const mempool::pgmap::unordered_map<uint64_t, pool_stat_t>& pg_pool_sum_old);
319 void clear_delta();
320
321 void deleted_pool(int64_t pool) {
322 pg_pool_sum.erase(pool);
323 num_pg_by_pool.erase(pool);
324 per_pool_sum_deltas.erase(pool);
325 per_pool_sum_deltas_stamps.erase(pool);
326 per_pool_sum_delta.erase(pool);
327 }
328
329 private:
330 void update_delta(
331 CephContext *cct,
332 const utime_t ts,
333 const pool_stat_t& old_pool_sum,
334 utime_t *last_ts,
335 const pool_stat_t& current_pool_sum,
336 pool_stat_t *result_pool_delta,
337 utime_t *result_ts_delta,
338 mempool::pgmap::list<pair<pool_stat_t,utime_t> > *delta_avg_list);
339
340 void update_one_pool_delta(CephContext *cct,
341 const utime_t ts,
342 const uint64_t pool,
343 const pool_stat_t& old_pool_sum);
344
345 epoch_t calc_min_last_epoch_clean() const;
346
347 public:
348
349 mempool::pgmap::set<pg_t> creating_pgs;
350 mempool::pgmap::map<int,map<epoch_t,set<pg_t> > > creating_pgs_by_osd_epoch;
351
352 // Bits that use to be enum StuckPG
353 static const int STUCK_INACTIVE = (1<<0);
354 static const int STUCK_UNCLEAN = (1<<1);
355 static const int STUCK_UNDERSIZED = (1<<2);
356 static const int STUCK_DEGRADED = (1<<3);
357 static const int STUCK_STALE = (1<<4);
358
359 PGMap()
360 : version(0),
361 last_osdmap_epoch(0), last_pg_scan(0),
362 full_ratio(0), nearfull_ratio(0)
363 {}
364
365 void set_full_ratios(float full, float nearfull) {
366 if (full_ratio == full && nearfull_ratio == nearfull)
367 return;
368 full_ratio = full;
369 nearfull_ratio = nearfull;
370 redo_full_sets();
371 }
372
373 version_t get_version() const {
374 return version;
375 }
376 void set_version(version_t v) {
377 version = v;
378 }
379 epoch_t get_last_osdmap_epoch() const {
380 return last_osdmap_epoch;
381 }
382 void set_last_osdmap_epoch(epoch_t e) {
383 last_osdmap_epoch = e;
384 }
385 epoch_t get_last_pg_scan() const {
386 return last_pg_scan;
387 }
388 void set_last_pg_scan(epoch_t e) {
389 last_pg_scan = e;
390 }
391 utime_t get_stamp() const {
392 return stamp;
393 }
394 void set_stamp(utime_t s) {
395 stamp = s;
396 }
397
398 pool_stat_t get_pg_pool_sum_stat(int64_t pool) const {
399 auto p = pg_pool_sum.find(pool);
400 if (p != pg_pool_sum.end())
401 return p->second;
402 return pool_stat_t();
403 }
404
405
406 void update_pg(pg_t pgid, bufferlist& bl);
407 void remove_pg(pg_t pgid);
408 void update_osd(int osd, bufferlist& bl);
409 void remove_osd(int osd);
410
411 void apply_incremental(CephContext *cct, const Incremental& inc);
412 void redo_full_sets();
413 void register_nearfull_status(int osd, const osd_stat_t& s);
414 void calc_stats();
415 void stat_pg_add(const pg_t &pgid, const pg_stat_t &s,
416 bool sameosds=false);
417 void stat_pg_sub(const pg_t &pgid, const pg_stat_t &s,
418 bool sameosds=false);
419 void stat_pg_update(const pg_t pgid, pg_stat_t &prev, bufferlist::iterator& blp);
420 void stat_osd_add(int osd, const osd_stat_t &s);
421 void stat_osd_sub(int osd, const osd_stat_t &s);
422
423 void encode(bufferlist &bl, uint64_t features=-1) const;
424 void decode(bufferlist::iterator &bl);
425
426 /// encode subset of our data to a PGMapDigest
427 void encode_digest(const OSDMap& osdmap,
428 bufferlist& bl, uint64_t features) const;
429
430 void dirty_all(Incremental& inc);
431
432 int64_t get_rule_avail(const OSDMap& osdmap, int ruleno) const;
433 void get_rules_avail(const OSDMap& osdmap,
434 std::map<int,int64_t> *avail_map) const;
435 void dump(Formatter *f) const;
436 void dump_basic(Formatter *f) const;
437 void dump_pg_stats(Formatter *f, bool brief) const;
438 void dump_pool_stats(Formatter *f) const;
439 void dump_osd_stats(Formatter *f) const;
440 void dump_delta(Formatter *f) const;
441 void dump_filtered_pg_stats(Formatter *f, set<pg_t>& pgs) const;
442 void dump_pool_stats_full(const OSDMap &osd_map, stringstream *ss,
443 Formatter *f, bool verbose) const override {
444 get_rules_avail(osd_map, &avail_space_by_rule);
445 PGMapDigest::dump_pool_stats_full(osd_map, ss, f, verbose);
446 }
447
448 void dump_pg_stats_plain(
449 ostream& ss,
450 const mempool::pgmap::unordered_map<pg_t, pg_stat_t>& pg_stats,
451 bool brief) const;
452 void get_stuck_stats(
453 int types, const utime_t cutoff,
454 mempool::pgmap::unordered_map<pg_t, pg_stat_t>& stuck_pgs) const;
455 bool get_stuck_counts(const utime_t cutoff, map<string, int>& note) const;
456 void dump_stuck(Formatter *f, int types, utime_t cutoff) const;
457 void dump_stuck_plain(ostream& ss, int types, utime_t cutoff) const;
458 int dump_stuck_pg_stats(stringstream &ds,
459 Formatter *f,
460 int threshold,
461 vector<string>& args) const;
462 void dump(ostream& ss) const;
463 void dump_basic(ostream& ss) const;
464 void dump_pg_stats(ostream& ss, bool brief) const;
465 void dump_pg_sum_stats(ostream& ss, bool header) const;
466 void dump_pool_stats(ostream& ss, bool header) const;
467 void dump_osd_stats(ostream& ss) const;
468 void dump_osd_sum_stats(ostream& ss) const;
469 void dump_filtered_pg_stats(ostream& ss, set<pg_t>& pgs) const;
470
471 void dump_osd_perf_stats(Formatter *f) const;
472 void print_osd_perf_stats(std::ostream *ss) const;
473
474 void dump_osd_blocked_by_stats(Formatter *f) const;
475 void print_osd_blocked_by_stats(std::ostream *ss) const;
476
477 void get_filtered_pg_stats(uint32_t state, int64_t poolid, int64_t osdid,
478 bool primary, set<pg_t>& pgs) const;
479
480 epoch_t get_min_last_epoch_clean() const {
481 if (!min_last_epoch_clean)
482 min_last_epoch_clean = calc_min_last_epoch_clean();
483 return min_last_epoch_clean;
484 }
485
486 float get_fallback_full_ratio() const override {
487 if (full_ratio > 0) {
488 return full_ratio;
489 }
490 return .95;
491 }
492
493 void get_health(CephContext *cct,
494 const OSDMap& osdmap,
495 list<pair<health_status_t,string> >& summary,
496 list<pair<health_status_t,string> > *detail) const;
497
498 static void generate_test_instances(list<PGMap*>& o);
499 };
500 WRITE_CLASS_ENCODER_FEATURES(PGMap::Incremental)
501 WRITE_CLASS_ENCODER_FEATURES(PGMap)
502
503 inline ostream& operator<<(ostream& out, const PGMapDigest& m) {
504 m.print_oneline_summary(NULL, &out);
505 return out;
506 }
507
508 int process_pg_map_command(
509 const string& prefix,
510 const map<string,cmd_vartype>& cmdmap,
511 const PGMap& pg_map,
512 const OSDMap& osdmap,
513 Formatter *f,
514 stringstream *ss,
515 bufferlist *odata);
516
517 class PGMapUpdater
518 {
519 public:
520 static void check_osd_map(
521 const OSDMap::Incremental &osd_inc,
522 std::set<int> *need_check_down_pg_osds,
523 std::map<int,utime_t> *last_osd_report,
524 PGMap *pg_map,
525 PGMap::Incremental *pending_inc);
526
527 static void check_osd_map(
528 CephContext *cct,
529 const OSDMap &osdmap,
530 const PGMap& pg_map,
531 PGMap::Incremental *pending_inc);
532 /**
533 * check latest osdmap for new pgs to register
534 */
535 static void register_new_pgs(
536 const OSDMap &osd_map,
537 const PGMap &pg_map,
538 PGMap::Incremental *pending_inc);
539
540 /**
541 * recalculate creating pg mappings
542 */
543 static void update_creating_pgs(
544 const OSDMap &osd_map,
545 const PGMap &pg_map,
546 PGMap::Incremental *pending_inc);
547
548 static void register_pg(
549 const OSDMap &osd_map,
550 pg_t pgid, epoch_t epoch,
551 bool new_pool,
552 const PGMap &pg_map,
553 PGMap::Incremental *pending_inc);
554
555 // mark pg's state stale if its acting primary osd is down
556 static void check_down_pgs(
557 const OSDMap &osd_map,
558 const PGMap &pg_map,
559 bool check_all,
560 const set<int>& need_check_down_pg_osds,
561 PGMap::Incremental *pending_inc);
562 };
563
564 namespace reweight {
565 /* Assign a lower weight to overloaded OSDs.
566 *
567 * The osds that will get a lower weight are those with with a utilization
568 * percentage 'oload' percent greater than the average utilization.
569 */
570 int by_utilization(const OSDMap &osd_map,
571 const PGMap &pg_map,
572 int oload,
573 double max_changef,
574 int max_osds,
575 bool by_pg, const set<int64_t> *pools,
576 bool no_increasing,
577 mempool::osdmap::map<int32_t, uint32_t>* new_weights,
578 std::stringstream *ss,
579 std::string *out_str,
580 Formatter *f);
581 }
582
583
584 class PGMapStatService : virtual public PGStatService {
585 protected:
586 const PGMap& pgmap;
587 public:
588 PGMapStatService(const PGMap& o)
589 : pgmap(o) {}
590
591 bool is_readable() const override { return true; }
592
593 const pool_stat_t* get_pool_stat(int poolid) const override {
594 auto i = pgmap.pg_pool_sum.find(poolid);
595 if (i != pgmap.pg_pool_sum.end()) {
596 return &i->second;
597 }
598 return nullptr;
599 }
600
601 const osd_stat_t& get_osd_sum() const override { return pgmap.osd_sum; }
602
603 const osd_stat_t *get_osd_stat(int osd) const override {
604 auto i = pgmap.osd_stat.find(osd);
605 if (i == pgmap.osd_stat.end()) {
606 return nullptr;
607 }
608 return &i->second;
609 }
610 const mempool::pgmap::unordered_map<int32_t,osd_stat_t>& get_osd_stat() const override {
611 return pgmap.osd_stat;
612 }
613 float get_full_ratio() const override { return pgmap.full_ratio; }
614 float get_nearfull_ratio() const override { return pgmap.nearfull_ratio; }
615
616 bool have_creating_pgs() const override {
617 return !pgmap.creating_pgs.empty();
618 }
619 bool is_creating_pg(pg_t pgid) const override {
620 return pgmap.creating_pgs.count(pgid);
621 }
622
623 epoch_t get_min_last_epoch_clean() const override {
624 return pgmap.get_min_last_epoch_clean();
625 }
626
627 bool have_full_osds() const override { return !pgmap.full_osds.empty(); }
628 bool have_nearfull_osds() const override {
629 return !pgmap.nearfull_osds.empty();
630 }
631
632 size_t get_num_pg_by_osd(int osd) const override {
633 return pgmap.get_num_pg_by_osd(osd);
634 }
635 ceph_statfs get_statfs() const override {
636 ceph_statfs statfs;
637 statfs.kb = pgmap.osd_sum.kb;
638 statfs.kb_used = pgmap.osd_sum.kb_used;
639 statfs.kb_avail = pgmap.osd_sum.kb_avail;
640 statfs.num_objects = pgmap.pg_sum.stats.sum.num_objects;
641 return statfs;
642 }
643 void print_summary(Formatter *f, ostream *out) const override {
644 pgmap.print_summary(f, out);
645 }
646 virtual void dump_info(Formatter *f) const override {
647 f->dump_object("pgmap", pgmap);
648 }
649 void dump_fs_stats(stringstream *ss,
650 Formatter *f,
651 bool verbose) const override {
652 pgmap.dump_fs_stats(ss, f, verbose);
653 }
654 void dump_pool_stats(const OSDMap& osdm, stringstream *ss, Formatter *f,
655 bool verbose) const override {
656 pgmap.dump_pool_stats_full(osdm, ss, f, verbose);
657 }
658
659 int process_pg_command(const string& prefix,
660 const map<string,cmd_vartype>& cmdmap,
661 const OSDMap& osdmap,
662 Formatter *f,
663 stringstream *ss,
664 bufferlist *odata) const override {
665 return process_pg_map_command(prefix, cmdmap, pgmap, osdmap, f, ss, odata);
666 }
667 };
668
669
670 #endif