]> git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/osd/pg_map.h
add stop-gap to fix compat with CPUs not supporting SSE 4.1
[ceph.git] / ceph / src / crimson / osd / pg_map.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #pragma once
5
6 #include <map>
7 #include <algorithm>
8
9 #include <seastar/core/future.hh>
10 #include <seastar/core/shared_future.hh>
11
12 #include "include/types.h"
13 #include "crimson/common/type_helpers.h"
14 #include "crimson/common/smp_helpers.h"
15 #include "crimson/osd/osd_operation.h"
16 #include "osd/osd_types.h"
17
18 namespace crimson::osd {
19 class PG;
20
21 /**
22 * PGShardMapping
23 *
24 * Maps pgs to shards.
25 */
26 class PGShardMapping {
27 public:
28 /// Returns mapping if present, NULL_CORE otherwise
29 core_id_t get_pg_mapping(spg_t pgid) {
30 auto iter = pg_to_core.find(pgid);
31 ceph_assert_always(iter == pg_to_core.end() || iter->second != NULL_CORE);
32 return iter == pg_to_core.end() ? NULL_CORE : iter->second;
33 }
34
35 /// Returns mapping for pgid, creates new one if it doesn't already exist
36 core_id_t maybe_create_pg(spg_t pgid, core_id_t core = NULL_CORE) {
37 auto [insert_iter, inserted] = pg_to_core.emplace(pgid, core);
38 if (!inserted) {
39 ceph_assert_always(insert_iter->second != NULL_CORE);
40 if (core != NULL_CORE) {
41 ceph_assert_always(insert_iter->second == core);
42 }
43 return insert_iter->second;
44 } else {
45 ceph_assert_always(core_to_num_pgs.size() > 0);
46 std::map<core_id_t, unsigned>::iterator core_iter;
47 if (core == NULL_CORE) {
48 core_iter = std::min_element(
49 core_to_num_pgs.begin(),
50 core_to_num_pgs.end(),
51 [](const auto &left, const auto &right) {
52 return left.second < right.second;
53 });
54 } else {
55 core_iter = core_to_num_pgs.find(core);
56 }
57 ceph_assert_always(core_to_num_pgs.end() != core_iter);
58 insert_iter->second = core_iter->first;
59 core_iter->second++;
60 return insert_iter->second;
61 }
62 }
63
64 /// Remove pgid
65 void remove_pg(spg_t pgid) {
66 auto iter = pg_to_core.find(pgid);
67 ceph_assert_always(iter != pg_to_core.end());
68 ceph_assert_always(iter->second != NULL_CORE);
69 auto count_iter = core_to_num_pgs.find(iter->second);
70 ceph_assert_always(count_iter != core_to_num_pgs.end());
71 ceph_assert_always(count_iter->second > 0);
72 --(count_iter->second);
73 pg_to_core.erase(iter);
74 }
75
76 size_t get_num_pgs() const { return pg_to_core.size(); }
77
78 /// Map to cores in [min_core_mapping, core_mapping_limit)
79 PGShardMapping(core_id_t min_core_mapping, core_id_t core_mapping_limit) {
80 ceph_assert_always(min_core_mapping < core_mapping_limit);
81 for (auto i = min_core_mapping; i != core_mapping_limit; ++i) {
82 core_to_num_pgs.emplace(i, 0);
83 }
84 }
85
86 template <typename F>
87 void for_each_pgid(F &&f) const {
88 for (const auto &i: pg_to_core) {
89 std::invoke(f, i.first);
90 }
91 }
92
93 private:
94 std::map<core_id_t, unsigned> core_to_num_pgs;
95 std::map<spg_t, core_id_t> pg_to_core;
96 };
97
98 /**
99 * PGMap
100 *
101 * Maps spg_t to PG instance within a shard. Handles dealing with waiting
102 * on pg creation.
103 */
104 class PGMap {
105 struct PGCreationState : BlockerT<PGCreationState> {
106 static constexpr const char * type_name = "PGCreation";
107
108 void dump_detail(Formatter *f) const final;
109
110 spg_t pgid;
111 seastar::shared_promise<Ref<PG>> promise;
112 bool creating = false;
113 PGCreationState(spg_t pgid);
114
115 PGCreationState(const PGCreationState &) = delete;
116 PGCreationState(PGCreationState &&) = delete;
117 PGCreationState &operator=(const PGCreationState &) = delete;
118 PGCreationState &operator=(PGCreationState &&) = delete;
119
120 ~PGCreationState();
121 };
122
123 std::map<spg_t, PGCreationState> pgs_creating;
124 using pgs_t = std::map<spg_t, Ref<PG>>;
125 pgs_t pgs;
126
127 public:
128 using PGCreationBlocker = PGCreationState;
129 using PGCreationBlockingEvent = PGCreationBlocker::BlockingEvent;
130 /**
131 * Get future for pg with a bool indicating whether it's already being
132 * created.
133 */
134 using wait_for_pg_ertr = crimson::errorator<
135 crimson::ct_error::ecanceled>;
136 using wait_for_pg_fut = wait_for_pg_ertr::future<Ref<PG>>;
137 using wait_for_pg_ret = std::pair<wait_for_pg_fut, bool>;
138 wait_for_pg_ret wait_for_pg(PGCreationBlockingEvent::TriggerI&&, spg_t pgid);
139
140 /**
141 * get PG in non-blocking manner
142 */
143 Ref<PG> get_pg(spg_t pgid);
144
145 /**
146 * Set creating
147 */
148 void set_creating(spg_t pgid);
149
150 /**
151 * Set newly created pg
152 */
153 void pg_created(spg_t pgid, Ref<PG> pg);
154
155 /**
156 * Add newly loaded pg
157 */
158 void pg_loaded(spg_t pgid, Ref<PG> pg);
159
160 /**
161 * Cancel pending creation of pgid.
162 */
163 void pg_creation_canceled(spg_t pgid);
164
165 void remove_pg(spg_t pgid);
166
167 pgs_t& get_pgs() { return pgs; }
168 const pgs_t& get_pgs() const { return pgs; }
169 auto get_pg_count() const { return pgs.size(); }
170 PGMap() = default;
171 ~PGMap();
172 };
173
174 }