]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
9f95a23c TL |
4 | #include <seastar/core/future.hh> |
5 | ||
6 | #include "include/types.h" | |
7 | #include "common/Formatter.h" | |
8 | #include "crimson/osd/pg.h" | |
1e59de90 TL |
9 | #include "crimson/osd/osdmap_service.h" |
10 | #include "crimson/osd/shard_services.h" | |
11 | #include "crimson/osd/osd_operations/pg_advance_map.h" | |
12 | #include "crimson/osd/osd_operation_external_tracking.h" | |
13 | #include "osd/PeeringState.h" | |
9f95a23c TL |
14 | |
15 | namespace { | |
16 | seastar::logger& logger() { | |
17 | return crimson::get_logger(ceph_subsys_osd); | |
18 | } | |
19 | } | |
20 | ||
21 | namespace crimson::osd { | |
22 | ||
23 | PGAdvanceMap::PGAdvanceMap( | |
1e59de90 | 24 | ShardServices &shard_services, Ref<PG> pg, epoch_t to, |
9f95a23c | 25 | PeeringCtx &&rctx, bool do_init) |
1e59de90 | 26 | : shard_services(shard_services), pg(pg), to(to), |
aee94f69 TL |
27 | rctx(std::move(rctx)), do_init(do_init) |
28 | { | |
29 | logger().debug("{}: created", *this); | |
30 | } | |
9f95a23c TL |
31 | |
32 | PGAdvanceMap::~PGAdvanceMap() {} | |
33 | ||
34 | void PGAdvanceMap::print(std::ostream &lhs) const | |
35 | { | |
36 | lhs << "PGAdvanceMap(" | |
37 | << "pg=" << pg->get_pgid() | |
1e59de90 | 38 | << " from=" << (from ? *from : -1) |
9f95a23c TL |
39 | << " to=" << to; |
40 | if (do_init) { | |
41 | lhs << " do_init"; | |
42 | } | |
43 | lhs << ")"; | |
44 | } | |
45 | ||
46 | void PGAdvanceMap::dump_detail(Formatter *f) const | |
47 | { | |
48 | f->open_object_section("PGAdvanceMap"); | |
49 | f->dump_stream("pgid") << pg->get_pgid(); | |
1e59de90 TL |
50 | if (from) { |
51 | f->dump_int("from", *from); | |
52 | } | |
9f95a23c TL |
53 | f->dump_int("to", to); |
54 | f->dump_bool("do_init", do_init); | |
55 | f->close_section(); | |
56 | } | |
57 | ||
aee94f69 TL |
58 | PGPeeringPipeline &PGAdvanceMap::peering_pp(PG &pg) |
59 | { | |
60 | return pg.peering_request_pg_pipeline; | |
61 | } | |
62 | ||
9f95a23c TL |
63 | seastar::future<> PGAdvanceMap::start() |
64 | { | |
1e59de90 | 65 | using cached_map_t = OSDMapService::cached_map_t; |
9f95a23c TL |
66 | |
67 | logger().debug("{}: start", *this); | |
68 | ||
69 | IRef ref = this; | |
1e59de90 | 70 | return enter_stage<>( |
aee94f69 | 71 | peering_pp(*pg).process |
1e59de90 | 72 | ).then([this] { |
aee94f69 TL |
73 | /* |
74 | * PGAdvanceMap is scheduled at pg creation and when | |
75 | * broadcasting new osdmaps to pgs. We are not able to serialize | |
76 | * between the two different PGAdvanceMap callers since a new pg | |
77 | * will get advanced to the latest osdmap at it's creation. | |
78 | * As a result, we may need to adjust the PGAdvance operation | |
79 | * 'from' epoch. | |
80 | * See: https://tracker.ceph.com/issues/61744 | |
81 | */ | |
1e59de90 TL |
82 | from = pg->get_osdmap_epoch(); |
83 | auto fut = seastar::now(); | |
84 | if (do_init) { | |
85 | fut = pg->handle_initialize(rctx | |
86 | ).then([this] { | |
87 | return pg->handle_activate_map(rctx); | |
88 | }); | |
89 | } | |
90 | return fut.then([this] { | |
aee94f69 | 91 | ceph_assert(std::cmp_less_equal(*from, to)); |
9f95a23c | 92 | return seastar::do_for_each( |
1e59de90 | 93 | boost::make_counting_iterator(*from + 1), |
9f95a23c TL |
94 | boost::make_counting_iterator(to + 1), |
95 | [this](epoch_t next_epoch) { | |
aee94f69 TL |
96 | logger().debug("{}: start: getting map {}", |
97 | *this, next_epoch); | |
1e59de90 | 98 | return shard_services.get_map(next_epoch).then( |
9f95a23c | 99 | [this] (cached_map_t&& next_map) { |
1e59de90 TL |
100 | logger().debug("{}: advancing map to {}", |
101 | *this, next_map->get_epoch()); | |
102 | return pg->handle_advance_map(next_map, rctx); | |
9f95a23c TL |
103 | }); |
104 | }).then([this] { | |
1e59de90 TL |
105 | return pg->handle_activate_map(rctx).then([this] { |
106 | logger().debug("{}: map activated", *this); | |
107 | if (do_init) { | |
108 | shard_services.pg_created(pg->get_pgid(), pg); | |
109 | logger().info("PGAdvanceMap::start new pg {}", *pg); | |
110 | } | |
111 | return seastar::when_all_succeed( | |
112 | pg->get_need_up_thru() | |
113 | ? shard_services.send_alive( | |
114 | pg->get_same_interval_since()) | |
115 | : seastar::now(), | |
116 | shard_services.dispatch_context( | |
117 | pg->get_collection_ref(), | |
118 | std::move(rctx))); | |
119 | }); | |
f67539c2 | 120 | }).then_unpack([this] { |
1e59de90 TL |
121 | logger().debug("{}: sending pg temp", *this); |
122 | return shard_services.send_pg_temp(); | |
123 | }); | |
9f95a23c | 124 | }); |
1e59de90 TL |
125 | }).then([this, ref=std::move(ref)] { |
126 | logger().debug("{}: complete", *this); | |
127 | }); | |
9f95a23c TL |
128 | } |
129 | ||
130 | } |