]> git.proxmox.com Git - ceph.git/blame - ceph/src/mgr/ClusterState.cc
update sources to v12.2.4
[ceph.git] / ceph / src / mgr / ClusterState.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2014 John Spray <john.spray@inktank.com>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 */
13
14#include "messages/MMgrDigest.h"
31f18b77 15#include "messages/MMonMgrReport.h"
7c673cae
FG
16#include "messages/MPGStats.h"
17
18#include "mgr/ClusterState.h"
19
20#define dout_context g_ceph_context
21#define dout_subsys ceph_subsys_mgr
22#undef dout_prefix
23#define dout_prefix *_dout << "mgr " << __func__ << " "
24
224ce89b
WB
25ClusterState::ClusterState(
26 MonClient *monc_,
27 Objecter *objecter_,
28 const MgrMap& mgrmap)
29 : monc(monc_),
30 objecter(objecter_),
31 lock("ClusterState"),
32 mgr_map(mgrmap),
33 pgservice(pg_map)
7c673cae
FG
34{}
35
36void ClusterState::set_objecter(Objecter *objecter_)
37{
38 Mutex::Locker l(lock);
39
40 objecter = objecter_;
41}
42
43void ClusterState::set_fsmap(FSMap const &new_fsmap)
44{
45 Mutex::Locker l(lock);
46
47 fsmap = new_fsmap;
48}
49
224ce89b
WB
50void ClusterState::set_mgr_map(MgrMap const &new_mgrmap)
51{
52 Mutex::Locker l(lock);
53 mgr_map = new_mgrmap;
54}
55
56void ClusterState::set_service_map(ServiceMap const &new_service_map)
57{
58 Mutex::Locker l(lock);
59 servicemap = new_service_map;
60}
61
7c673cae
FG
62void ClusterState::load_digest(MMgrDigest *m)
63{
64 health_json = std::move(m->health_json);
65 mon_status_json = std::move(m->mon_status_json);
66}
67
68void ClusterState::ingest_pgstats(MPGStats *stats)
69{
70 Mutex::Locker l(lock);
7c673cae
FG
71
72 const int from = stats->get_orig_source().num();
3a9019d9
FG
73
74 pending_inc.update_stat(from, stats->epoch, std::move(stats->osd_stat));
7c673cae
FG
75
76 for (auto p : stats->pg_stat) {
77 pg_t pgid = p.first;
78 const auto &pg_stats = p.second;
79
80 // In case we're hearing about a PG that according to last
81 // OSDMap update should not exist
31f18b77
FG
82 if (existing_pools.count(pgid.pool()) == 0) {
83 dout(15) << " got " << pgid
84 << " reported at " << pg_stats.reported_epoch << ":"
7c673cae
FG
85 << pg_stats.reported_seq
86 << " state " << pg_state_string(pg_stats.state)
31f18b77 87 << " but pool not in " << existing_pools
7c673cae
FG
88 << dendl;
89 continue;
31f18b77
FG
90 }
91 // In case we already heard about more recent stats from this PG
92 // from another OSD
224ce89b
WB
93 const auto q = pg_map.pg_stat.find(pgid);
94 if (q != pg_map.pg_stat.end() &&
95 q->second.get_version_pair() > pg_stats.get_version_pair()) {
31f18b77 96 dout(15) << " had " << pgid << " from "
224ce89b
WB
97 << q->second.reported_epoch << ":"
98 << q->second.reported_seq << dendl;
7c673cae
FG
99 continue;
100 }
101
102 pending_inc.pg_stat_updates[pgid] = pg_stats;
103 }
31f18b77
FG
104}
105
106void ClusterState::update_delta_stats()
107{
108 pending_inc.stamp = ceph_clock_now();
109 pending_inc.version = pg_map.version + 1; // to make apply_incremental happy
110 dout(10) << " v" << pending_inc.version << dendl;
111
112 dout(30) << " pg_map before:\n";
113 JSONFormatter jf(true);
114 jf.dump_object("pg_map", pg_map);
115 jf.flush(*_dout);
116 *_dout << dendl;
117 dout(30) << " incremental:\n";
118 JSONFormatter jf(true);
119 jf.dump_object("pending_inc", pending_inc);
120 jf.flush(*_dout);
121 *_dout << dendl;
7c673cae
FG
122
123 pg_map.apply_incremental(g_ceph_context, pending_inc);
31f18b77 124 pending_inc = PGMap::Incremental();
7c673cae
FG
125}
126
127void ClusterState::notify_osdmap(const OSDMap &osd_map)
128{
129 Mutex::Locker l(lock);
130
31f18b77 131 pending_inc.stamp = ceph_clock_now();
7c673cae 132 pending_inc.version = pg_map.version + 1; // to make apply_incremental happy
31f18b77 133 dout(10) << " v" << pending_inc.version << dendl;
7c673cae 134
31f18b77
FG
135 PGMapUpdater::check_osd_map(g_ceph_context, osd_map, pg_map, &pending_inc);
136
137 // update our list of pools that exist, so that we can filter pg_map updates
138 // in synchrony with this OSDMap.
139 existing_pools.clear();
140 for (auto& p : osd_map.get_pools()) {
141 existing_pools.insert(p.first);
142 }
7c673cae
FG
143
144 // brute force this for now (don't bother being clever by only
145 // checking osds that went up/down)
146 set<int> need_check_down_pg_osds;
147 PGMapUpdater::check_down_pgs(osd_map, pg_map, true,
148 need_check_down_pg_osds, &pending_inc);
149
31f18b77
FG
150 dout(30) << " pg_map before:\n";
151 JSONFormatter jf(true);
152 jf.dump_object("pg_map", pg_map);
153 jf.flush(*_dout);
154 *_dout << dendl;
155 dout(30) << " incremental:\n";
156 JSONFormatter jf(true);
157 jf.dump_object("pending_inc", pending_inc);
158 jf.flush(*_dout);
159 *_dout << dendl;
7c673cae 160
31f18b77
FG
161 pg_map.apply_incremental(g_ceph_context, pending_inc);
162 pending_inc = PGMap::Incremental();
7c673cae
FG
163 // TODO: Complete the separation of PG state handling so
164 // that a cut-down set of functionality remains in PGMonitor
165 // while the full-blown PGMap lives only here.
166}