]>
git.proxmox.com Git - ceph.git/blob - ceph/src/osd/OSDMapMapping.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include "OSDMapMapping.h"
7 #define dout_subsys ceph_subsys_mon
9 #include "common/debug.h"
11 MEMPOOL_DEFINE_OBJECT_FACTORY(OSDMapMapping
, osdmapmapping
,
14 // ensure that we have a PoolMappings for each pool and that
15 // the dimensions (pg_num and size) match up.
16 void OSDMapMapping::_init_mappings(const OSDMap
& osdmap
)
19 auto q
= pools
.begin();
20 for (auto& p
: osdmap
.get_pools()) {
21 num_pgs
+= p
.second
.get_pg_num();
22 // drop unneeded pools
23 while (q
!= pools
.end() && q
->first
< p
.first
) {
26 if (q
!= pools
.end() && q
->first
== p
.first
) {
27 if (q
->second
.pg_num
!= p
.second
.get_pg_num() ||
28 q
->second
.size
!= p
.second
.get_size()) {
37 pools
.emplace(p
.first
, PoolMapping(p
.second
.get_size(),
38 p
.second
.get_pg_num(),
39 p
.second
.is_erasure()));
41 pools
.erase(q
, pools
.end());
42 ceph_assert(pools
.size() == osdmap
.get_pools().size());
45 void OSDMapMapping::update(const OSDMap
& osdmap
)
48 for (auto& p
: osdmap
.get_pools()) {
49 _update_range(osdmap
, p
.first
, 0, p
.second
.get_pg_num());
52 //_dump(); // for debugging
55 void OSDMapMapping::update(const OSDMap
& osdmap
, pg_t pgid
)
57 _update_range(osdmap
, pgid
.pool(), pgid
.ps(), pgid
.ps() + 1);
60 void OSDMapMapping::_build_rmap(const OSDMap
& osdmap
)
62 acting_rmap
.resize(osdmap
.get_max_osd());
63 //up_rmap.resize(osdmap.get_max_osd());
64 for (auto& v
: acting_rmap
) {
67 //for (auto& v : up_rmap) {
70 for (auto& p
: pools
) {
71 pg_t
pgid(0, p
.first
);
72 for (unsigned ps
= 0; ps
< p
.second
.pg_num
; ++ps
) {
74 int32_t *row
= &p
.second
.table
[p
.second
.row_size() * ps
];
75 for (int i
= 0; i
< row
[2]; ++i
) {
76 if (row
[4 + i
] != CRUSH_ITEM_NONE
) {
77 acting_rmap
[row
[4 + i
]].push_back(pgid
);
80 //for (int i = 0; i < row[3]; ++i) {
81 //up_rmap[row[4 + p.second.size + i]].push_back(pgid);
87 void OSDMapMapping::_finish(const OSDMap
& osdmap
)
90 epoch
= osdmap
.get_epoch();
93 void OSDMapMapping::_dump()
95 for (auto& p
: pools
) {
96 cout
<< "pool " << p
.first
<< std::endl
;
97 for (unsigned i
= 0; i
< p
.second
.table
.size(); ++i
) {
98 cout
<< " " << p
.second
.table
[i
];
99 if (i
% p
.second
.row_size() == p
.second
.row_size() - 1)
105 void OSDMapMapping::_update_range(
106 const OSDMap
& osdmap
,
111 auto i
= pools
.find(pool
);
112 ceph_assert(i
!= pools
.end());
113 ceph_assert(pg_begin
<= pg_end
);
114 ceph_assert(pg_end
<= i
->second
.pg_num
);
115 for (unsigned ps
= pg_begin
; ps
< pg_end
; ++ps
) {
116 vector
<int> up
, acting
;
117 int up_primary
, acting_primary
;
118 osdmap
.pg_to_up_acting_osds(
120 &up
, &up_primary
, &acting
, &acting_primary
);
121 i
->second
.set(ps
, std::move(up
), up_primary
,
122 std::move(acting
), acting_primary
);
126 // ---------------------------
128 void ParallelPGMapper::Job::finish_one()
130 Context
*fin
= nullptr;
132 std::lock_guard
l(lock
);
135 finish
= ceph_clock_now();
148 void ParallelPGMapper::WQ::_process(Item
*i
, ThreadPool::TPHandle
&h
)
150 ldout(m
->cct
, 20) << __func__
<< " " << i
->job
<< " " << i
->pool
151 << " [" << i
->begin
<< "," << i
->end
<< ")" << dendl
;
152 i
->job
->process(i
->pool
, i
->begin
, i
->end
);
153 i
->job
->finish_one();
157 void ParallelPGMapper::queue(
159 unsigned pgs_per_item
)
162 for (auto& p
: job
->osdmap
->get_pools()) {
163 for (unsigned ps
= 0; ps
< p
.second
.get_pg_num(); ps
+= pgs_per_item
) {
164 unsigned ps_end
= std::min(ps
+ pgs_per_item
, p
.second
.get_pg_num());
166 wq
.queue(new Item(job
, p
.first
, ps
, ps_end
));
167 ldout(cct
, 20) << __func__
<< " " << job
<< " " << p
.first
<< " [" << ps
168 << "," << ps_end
<< ")" << dendl
;