]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | /* |
2 | * This file is open source software, licensed to you under the terms | |
3 | * of the Apache License, Version 2.0 (the "License"). See the NOTICE file | |
4 | * distributed with this work for additional information regarding copyright | |
5 | * ownership. You may not use this file except in compliance with the License. | |
6 | * | |
7 | * You may obtain a copy of the License at | |
8 | * | |
9 | * http://www.apache.org/licenses/LICENSE-2.0 | |
10 | * | |
11 | * Unless required by applicable law or agreed to in writing, | |
12 | * software distributed under the License is distributed on an | |
13 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | |
14 | * KIND, either express or implied. See the License for the | |
15 | * specific language governing permissions and limitations | |
16 | * under the License. | |
17 | */ | |
18 | /* | |
19 | * Copyright (C) 2017 ScyllaDB Ltd. | |
20 | */ | |
21 | ||
22 | #include <algorithm> | |
23 | #include <vector> | |
24 | #include <chrono> | |
25 | ||
26 | #include <seastar/core/thread.hh> | |
27 | #include <seastar/testing/test_case.hh> | |
28 | #include <seastar/testing/thread_test_case.hh> | |
29 | #include <seastar/testing/test_runner.hh> | |
30 | #include <seastar/core/execution_stage.hh> | |
31 | #include <seastar/core/sleep.hh> | |
32 | #include <seastar/core/print.hh> | |
33 | #include <seastar/core/scheduling_specific.hh> | |
f67539c2 TL |
34 | #include <seastar/core/smp.hh> |
35 | #include <seastar/core/with_scheduling_group.hh> | |
20effc67 | 36 | #include <seastar/core/reactor.hh> |
f67539c2 | 37 | #include <seastar/util/later.hh> |
20effc67 | 38 | #include <seastar/util/defer.hh> |
9f95a23c TL |
39 | |
40 | using namespace std::chrono_literals; | |
41 | ||
42 | using namespace seastar; | |
43 | ||
44 | /** | |
45 | * Test setting primitive and object as a value after all groups are created | |
46 | */ | |
47 | SEASTAR_THREAD_TEST_CASE(sg_specific_values_define_after_sg_create) { | |
48 | using ivec = std::vector<int>; | |
49 | const int num_scheduling_groups = 4; | |
50 | std::vector<scheduling_group> sgs; | |
51 | for (int i = 0; i < num_scheduling_groups; i++) { | |
52 | sgs.push_back(create_scheduling_group(format("sg{}", i).c_str(), 100).get0()); | |
53 | } | |
54 | ||
20effc67 | 55 | const auto destroy_scheduling_groups = defer([&sgs] () noexcept { |
9f95a23c TL |
56 | for (scheduling_group sg : sgs) { |
57 | destroy_scheduling_group(sg).get(); | |
58 | } | |
59 | }); | |
60 | scheduling_group_key_config key1_conf = make_scheduling_group_key_config<int>(); | |
61 | scheduling_group_key key1 = scheduling_group_key_create(key1_conf).get0(); | |
62 | ||
63 | scheduling_group_key_config key2_conf = make_scheduling_group_key_config<ivec>(); | |
64 | scheduling_group_key key2 = scheduling_group_key_create(key2_conf).get0(); | |
65 | ||
66 | smp::invoke_on_all([key1, key2, &sgs] () { | |
f67539c2 | 67 | int factor = this_shard_id() + 1; |
9f95a23c TL |
68 | for (int i=0; i < num_scheduling_groups; i++) { |
69 | sgs[i].get_specific<int>(key1) = (i + 1) * factor; | |
70 | sgs[i].get_specific<ivec>(key2).push_back((i + 1) * factor); | |
71 | } | |
72 | ||
73 | for (int i=0; i < num_scheduling_groups; i++) { | |
74 | BOOST_REQUIRE_EQUAL(sgs[i].get_specific<int>(key1) = (i + 1) * factor, (i + 1) * factor); | |
75 | BOOST_REQUIRE_EQUAL(sgs[i].get_specific<ivec>(key2)[0], (i + 1) * factor); | |
76 | } | |
77 | ||
78 | }).get(); | |
79 | ||
80 | smp::invoke_on_all([key1, key2] () { | |
81 | return reduce_scheduling_group_specific<int>(std::plus<int>(), int(0), key1).then([] (int sum) { | |
f67539c2 | 82 | int factor = this_shard_id() + 1; |
9f95a23c TL |
83 | int expected_sum = ((1 + num_scheduling_groups)*num_scheduling_groups) * factor /2; |
84 | BOOST_REQUIRE_EQUAL(expected_sum, sum); | |
85 | }). then([key2] { | |
86 | auto ivec_to_int = [] (ivec& v) { | |
87 | return v.size() ? v[0] : 0; | |
88 | }; | |
89 | ||
90 | return map_reduce_scheduling_group_specific<ivec>(ivec_to_int, std::plus<int>(), int(0), key2).then([] (int sum) { | |
f67539c2 | 91 | int factor = this_shard_id() + 1; |
9f95a23c TL |
92 | int expected_sum = ((1 + num_scheduling_groups)*num_scheduling_groups) * factor /2; |
93 | BOOST_REQUIRE_EQUAL(expected_sum, sum); | |
94 | }); | |
95 | ||
96 | }); | |
97 | }).get(); | |
98 | ||
99 | ||
100 | } | |
101 | ||
102 | /** | |
103 | * Test setting primitive and object as a value before all groups are created | |
104 | */ | |
105 | SEASTAR_THREAD_TEST_CASE(sg_specific_values_define_before_sg_create) { | |
106 | using ivec = std::vector<int>; | |
107 | const int num_scheduling_groups = 4; | |
108 | std::vector<scheduling_group> sgs; | |
20effc67 | 109 | const auto destroy_scheduling_groups = defer([&sgs] () noexcept { |
9f95a23c TL |
110 | for (scheduling_group sg : sgs) { |
111 | destroy_scheduling_group(sg).get(); | |
112 | } | |
113 | }); | |
114 | scheduling_group_key_config key1_conf = make_scheduling_group_key_config<int>(); | |
115 | scheduling_group_key key1 = scheduling_group_key_create(key1_conf).get0(); | |
116 | ||
117 | scheduling_group_key_config key2_conf = make_scheduling_group_key_config<ivec>(); | |
118 | scheduling_group_key key2 = scheduling_group_key_create(key2_conf).get0(); | |
119 | ||
120 | for (int i = 0; i < num_scheduling_groups; i++) { | |
121 | sgs.push_back(create_scheduling_group(format("sg{}", i).c_str(), 100).get0()); | |
122 | } | |
123 | ||
124 | smp::invoke_on_all([key1, key2, &sgs] () { | |
f67539c2 | 125 | int factor = this_shard_id() + 1; |
9f95a23c TL |
126 | for (int i=0; i < num_scheduling_groups; i++) { |
127 | sgs[i].get_specific<int>(key1) = (i + 1) * factor; | |
128 | sgs[i].get_specific<ivec>(key2).push_back((i + 1) * factor); | |
129 | } | |
130 | ||
131 | for (int i=0; i < num_scheduling_groups; i++) { | |
132 | BOOST_REQUIRE_EQUAL(sgs[i].get_specific<int>(key1) = (i + 1) * factor, (i + 1) * factor); | |
133 | BOOST_REQUIRE_EQUAL(sgs[i].get_specific<ivec>(key2)[0], (i + 1) * factor); | |
134 | } | |
135 | ||
136 | }).get(); | |
137 | ||
138 | smp::invoke_on_all([key1, key2] () { | |
139 | return reduce_scheduling_group_specific<int>(std::plus<int>(), int(0), key1).then([] (int sum) { | |
f67539c2 | 140 | int factor = this_shard_id() + 1; |
9f95a23c TL |
141 | int expected_sum = ((1 + num_scheduling_groups)*num_scheduling_groups) * factor /2; |
142 | BOOST_REQUIRE_EQUAL(expected_sum, sum); | |
143 | }). then([key2] { | |
144 | auto ivec_to_int = [] (ivec& v) { | |
145 | return v.size() ? v[0] : 0; | |
146 | }; | |
147 | ||
148 | return map_reduce_scheduling_group_specific<ivec>(ivec_to_int, std::plus<int>(), int(0), key2).then([] (int sum) { | |
f67539c2 | 149 | int factor = this_shard_id() + 1; |
9f95a23c TL |
150 | int expected_sum = ((1 + num_scheduling_groups)*num_scheduling_groups) * factor /2; |
151 | BOOST_REQUIRE_EQUAL(expected_sum, sum); | |
152 | }); | |
153 | ||
154 | }); | |
155 | }).get(); | |
156 | ||
157 | } | |
158 | ||
159 | /** | |
160 | * Test setting primitive and an object as a value before some groups are created | |
161 | * and after some of the groups are created. | |
162 | */ | |
163 | SEASTAR_THREAD_TEST_CASE(sg_specific_values_define_before_and_after_sg_create) { | |
164 | using ivec = std::vector<int>; | |
165 | const int num_scheduling_groups = 4; | |
166 | std::vector<scheduling_group> sgs; | |
20effc67 | 167 | const auto destroy_scheduling_groups = defer([&sgs] () noexcept { |
9f95a23c TL |
168 | for (scheduling_group sg : sgs) { |
169 | destroy_scheduling_group(sg).get(); | |
170 | } | |
171 | }); | |
172 | ||
173 | for (int i = 0; i < num_scheduling_groups/2; i++) { | |
174 | sgs.push_back(create_scheduling_group(format("sg{}", i).c_str(), 100).get0()); | |
175 | } | |
176 | scheduling_group_key_config key1_conf = make_scheduling_group_key_config<int>(); | |
177 | scheduling_group_key key1 = scheduling_group_key_create(key1_conf).get0(); | |
178 | ||
179 | scheduling_group_key_config key2_conf = make_scheduling_group_key_config<ivec>(); | |
180 | scheduling_group_key key2 = scheduling_group_key_create(key2_conf).get0(); | |
181 | ||
182 | for (int i = num_scheduling_groups/2; i < num_scheduling_groups; i++) { | |
183 | sgs.push_back(create_scheduling_group(format("sg{}", i).c_str(), 100).get0()); | |
184 | } | |
185 | ||
186 | smp::invoke_on_all([key1, key2, &sgs] () { | |
f67539c2 | 187 | int factor = this_shard_id() + 1; |
9f95a23c TL |
188 | for (int i=0; i < num_scheduling_groups; i++) { |
189 | sgs[i].get_specific<int>(key1) = (i + 1) * factor; | |
190 | sgs[i].get_specific<ivec>(key2).push_back((i + 1) * factor); | |
191 | } | |
192 | ||
193 | for (int i=0; i < num_scheduling_groups; i++) { | |
194 | BOOST_REQUIRE_EQUAL(sgs[i].get_specific<int>(key1) = (i + 1) * factor, (i + 1) * factor); | |
195 | BOOST_REQUIRE_EQUAL(sgs[i].get_specific<ivec>(key2)[0], (i + 1) * factor); | |
196 | } | |
197 | ||
198 | }).get(); | |
199 | ||
200 | smp::invoke_on_all([key1, key2] () { | |
201 | return reduce_scheduling_group_specific<int>(std::plus<int>(), int(0), key1).then([] (int sum) { | |
f67539c2 | 202 | int factor = this_shard_id() + 1; |
9f95a23c TL |
203 | int expected_sum = ((1 + num_scheduling_groups)*num_scheduling_groups) * factor /2; |
204 | BOOST_REQUIRE_EQUAL(expected_sum, sum); | |
205 | }). then([key2] { | |
206 | auto ivec_to_int = [] (ivec& v) { | |
207 | return v.size() ? v[0] : 0; | |
208 | }; | |
209 | ||
210 | return map_reduce_scheduling_group_specific<ivec>(ivec_to_int, std::plus<int>(), int(0), key2).then([] (int sum) { | |
f67539c2 | 211 | int factor = this_shard_id() + 1; |
9f95a23c TL |
212 | int expected_sum = ((1 + num_scheduling_groups)*num_scheduling_groups) * factor /2; |
213 | BOOST_REQUIRE_EQUAL(expected_sum, sum); | |
214 | }); | |
215 | ||
216 | }); | |
217 | }).get(); | |
218 | } | |
219 | ||
220 | /* | |
221 | * Test that current scheduling group is inherited by seastar::async() | |
222 | */ | |
223 | SEASTAR_THREAD_TEST_CASE(sg_scheduling_group_inheritance_in_seastar_async_test) { | |
224 | scheduling_group sg = create_scheduling_group("sg0", 100).get0(); | |
20effc67 | 225 | auto cleanup = defer([&] () noexcept { destroy_scheduling_group(sg).get(); }); |
9f95a23c TL |
226 | thread_attributes attr = {}; |
227 | attr.sched_group = sg; | |
228 | seastar::async(attr, [attr] { | |
229 | BOOST_REQUIRE_EQUAL(internal::scheduling_group_index(current_scheduling_group()), | |
230 | internal::scheduling_group_index(*(attr.sched_group))); | |
231 | ||
232 | seastar::async([attr] { | |
233 | BOOST_REQUIRE_EQUAL(internal::scheduling_group_index(current_scheduling_group()), | |
234 | internal::scheduling_group_index(*(attr.sched_group))); | |
235 | ||
236 | smp::invoke_on_all([sched_group_idx = internal::scheduling_group_index(*(attr.sched_group))] () { | |
237 | BOOST_REQUIRE_EQUAL(internal::scheduling_group_index(current_scheduling_group()), sched_group_idx); | |
238 | }).get(); | |
239 | }).get(); | |
240 | }).get(); | |
241 | } | |
f67539c2 TL |
242 | |
243 | ||
244 | SEASTAR_THREAD_TEST_CASE(later_preserves_sg) { | |
245 | scheduling_group sg = create_scheduling_group("sg", 100).get0(); | |
20effc67 | 246 | auto cleanup = defer([&] () noexcept { destroy_scheduling_group(sg).get(); }); |
f67539c2 TL |
247 | with_scheduling_group(sg, [&] { |
248 | return later().then([&] { | |
249 | BOOST_REQUIRE_EQUAL( | |
250 | internal::scheduling_group_index(current_scheduling_group()), | |
251 | internal::scheduling_group_index(sg)); | |
252 | }); | |
253 | }).get(); | |
254 | } | |
20effc67 TL |
255 | |
256 | SEASTAR_THREAD_TEST_CASE(sg_count) { | |
257 | class scheduling_group_destroyer { | |
258 | scheduling_group _sg; | |
259 | public: | |
260 | scheduling_group_destroyer(scheduling_group sg) : _sg(sg) {} | |
261 | ~scheduling_group_destroyer() { | |
262 | destroy_scheduling_group(_sg).get(); | |
263 | } | |
264 | }; | |
265 | ||
266 | std::vector<scheduling_group_destroyer> scheduling_groups_deferred_cleanup; | |
267 | // The line below is necessary in order to skip support of copy and move construction of scheduling_group_destroyer. | |
268 | scheduling_groups_deferred_cleanup.reserve(max_scheduling_groups()); | |
269 | // try to create 3 groups too many. | |
270 | for (auto i = internal::scheduling_group_count(); i < max_scheduling_groups() + 3; i++) { | |
271 | try { | |
272 | BOOST_REQUIRE_LE(internal::scheduling_group_count(), max_scheduling_groups()); | |
273 | scheduling_groups_deferred_cleanup.emplace_back(create_scheduling_group(format("sg_{}", i), 10).get()); | |
274 | } catch (std::runtime_error& e) { | |
275 | // make sure it is the right exception. | |
276 | BOOST_REQUIRE_EQUAL(e.what(), "Scheduling group limit exceeded"); | |
277 | // make sure that the scheduling group count makes sense | |
278 | BOOST_REQUIRE_EQUAL(internal::scheduling_group_count(), max_scheduling_groups()); | |
279 | // make sure that we expect this exception at this point | |
280 | BOOST_REQUIRE_GE(i, max_scheduling_groups()); | |
281 | } | |
282 | } | |
283 | BOOST_REQUIRE_EQUAL(internal::scheduling_group_count(), max_scheduling_groups()); | |
284 | } |