]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
4 | #include "OSDMapMapping.h" | |
5 | #include "OSDMap.h" | |
6 | ||
7 | #define dout_subsys ceph_subsys_mon | |
8 | ||
9 | #include "common/debug.h" | |
10 | ||
11 | MEMPOOL_DEFINE_OBJECT_FACTORY(OSDMapMapping, osdmapmapping, | |
12 | osdmap_mapping); | |
13 | ||
14 | // ensure that we have a PoolMappings for each pool and that | |
15 | // the dimensions (pg_num and size) match up. | |
16 | void OSDMapMapping::_init_mappings(const OSDMap& osdmap) | |
17 | { | |
18 | num_pgs = 0; | |
19 | auto q = pools.begin(); | |
20 | for (auto& p : osdmap.get_pools()) { | |
21 | num_pgs += p.second.get_pg_num(); | |
22 | // drop unneeded pools | |
23 | while (q != pools.end() && q->first < p.first) { | |
24 | q = pools.erase(q); | |
25 | } | |
26 | if (q != pools.end() && q->first == p.first) { | |
27 | if (q->second.pg_num != p.second.get_pg_num() || | |
28 | q->second.size != p.second.get_size()) { | |
29 | // pg_num changed | |
30 | q = pools.erase(q); | |
31 | } else { | |
32 | // keep it | |
33 | ++q; | |
34 | continue; | |
35 | } | |
36 | } | |
37 | pools.emplace(p.first, PoolMapping(p.second.get_size(), | |
38 | p.second.get_pg_num())); | |
39 | } | |
40 | pools.erase(q, pools.end()); | |
41 | assert(pools.size() == osdmap.get_pools().size()); | |
42 | } | |
43 | ||
44 | void OSDMapMapping::update(const OSDMap& osdmap) | |
45 | { | |
46 | _start(osdmap); | |
47 | for (auto& p : osdmap.get_pools()) { | |
48 | _update_range(osdmap, p.first, 0, p.second.get_pg_num()); | |
49 | } | |
50 | _finish(osdmap); | |
51 | //_dump(); // for debugging | |
52 | } | |
53 | ||
54 | void OSDMapMapping::update(const OSDMap& osdmap, pg_t pgid) | |
55 | { | |
56 | _update_range(osdmap, pgid.pool(), pgid.ps(), pgid.ps() + 1); | |
57 | } | |
58 | ||
59 | void OSDMapMapping::_build_rmap(const OSDMap& osdmap) | |
60 | { | |
61 | acting_rmap.resize(osdmap.get_max_osd()); | |
62 | //up_rmap.resize(osdmap.get_max_osd()); | |
63 | for (auto& v : acting_rmap) { | |
64 | v.resize(0); | |
65 | } | |
66 | //for (auto& v : up_rmap) { | |
67 | // v.resize(0); | |
68 | //} | |
69 | for (auto& p : pools) { | |
70 | pg_t pgid(0, p.first); | |
71 | for (unsigned ps = 0; ps < p.second.pg_num; ++ps) { | |
72 | pgid.set_ps(ps); | |
73 | int32_t *row = &p.second.table[p.second.row_size() * ps]; | |
74 | for (int i = 0; i < row[2]; ++i) { | |
75 | if (row[4 + i] != CRUSH_ITEM_NONE) { | |
76 | acting_rmap[row[4 + i]].push_back(pgid); | |
77 | } | |
78 | } | |
79 | //for (int i = 0; i < row[3]; ++i) { | |
80 | //up_rmap[row[4 + p.second.size + i]].push_back(pgid); | |
81 | //} | |
82 | } | |
83 | } | |
84 | } | |
85 | ||
86 | void OSDMapMapping::_finish(const OSDMap& osdmap) | |
87 | { | |
88 | _build_rmap(osdmap); | |
89 | epoch = osdmap.get_epoch(); | |
90 | } | |
91 | ||
92 | void OSDMapMapping::_dump() | |
93 | { | |
94 | for (auto& p : pools) { | |
95 | cout << "pool " << p.first << std::endl; | |
96 | for (unsigned i = 0; i < p.second.table.size(); ++i) { | |
97 | cout << " " << p.second.table[i]; | |
98 | if (i % p.second.row_size() == p.second.row_size() - 1) | |
99 | cout << std::endl; | |
100 | } | |
101 | } | |
102 | } | |
103 | ||
104 | void OSDMapMapping::_update_range( | |
105 | const OSDMap& osdmap, | |
106 | int64_t pool, | |
107 | unsigned pg_begin, | |
108 | unsigned pg_end) | |
109 | { | |
110 | auto i = pools.find(pool); | |
111 | assert(i != pools.end()); | |
112 | assert(pg_begin <= pg_end); | |
113 | assert(pg_end <= i->second.pg_num); | |
114 | for (unsigned ps = pg_begin; ps < pg_end; ++ps) { | |
115 | vector<int> up, acting; | |
116 | int up_primary, acting_primary; | |
117 | osdmap.pg_to_up_acting_osds( | |
118 | pg_t(ps, pool), | |
119 | &up, &up_primary, &acting, &acting_primary); | |
120 | i->second.set(ps, std::move(up), up_primary, | |
121 | std::move(acting), acting_primary); | |
122 | } | |
123 | } | |
124 | ||
125 | // --------------------------- | |
126 | ||
127 | void ParallelPGMapper::Job::finish_one() | |
128 | { | |
129 | Context *fin = nullptr; | |
130 | { | |
131 | Mutex::Locker l(lock); | |
132 | if (--shards == 0) { | |
133 | if (!aborted) { | |
134 | finish = ceph_clock_now(); | |
135 | complete(); | |
136 | } | |
137 | cond.Signal(); | |
138 | fin = onfinish; | |
139 | onfinish = nullptr; | |
140 | } | |
141 | } | |
142 | if (fin) { | |
143 | fin->complete(0); | |
144 | } | |
145 | } | |
146 | ||
147 | void ParallelPGMapper::WQ::_process(Item *i, ThreadPool::TPHandle &h) | |
148 | { | |
149 | ldout(m->cct, 20) << __func__ << " " << i->job << " " << i->pool | |
150 | << " [" << i->begin << "," << i->end << ")" << dendl; | |
151 | i->job->process(i->pool, i->begin, i->end); | |
152 | i->job->finish_one(); | |
153 | delete i; | |
154 | } | |
155 | ||
156 | void ParallelPGMapper::queue( | |
157 | Job *job, | |
158 | unsigned pgs_per_item) | |
159 | { | |
224ce89b | 160 | bool any = false; |
7c673cae FG |
161 | for (auto& p : job->osdmap->get_pools()) { |
162 | for (unsigned ps = 0; ps < p.second.get_pg_num(); ps += pgs_per_item) { | |
163 | unsigned ps_end = MIN(ps + pgs_per_item, p.second.get_pg_num()); | |
164 | job->start_one(); | |
165 | wq.queue(new Item(job, p.first, ps, ps_end)); | |
166 | ldout(cct, 20) << __func__ << " " << job << " " << p.first << " [" << ps | |
167 | << "," << ps_end << ")" << dendl; | |
224ce89b | 168 | any = true; |
7c673cae FG |
169 | } |
170 | } | |
224ce89b | 171 | assert(any); |
7c673cae | 172 | } |