]> git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/common/smp_helpers.h
c2b7bd9641a77d33390f50b991c24d536370d38d
[ceph.git] / ceph / src / crimson / common / smp_helpers.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #pragma once
5
6 #include <limits>
7
8 #include <seastar/core/smp.hh>
9
10 #include "crimson/common/errorator.h"
11 #include "crimson/common/utility.h"
12
13 namespace crimson {
14
15 using core_id_t = seastar::shard_id;
16 static constexpr core_id_t NULL_CORE = std::numeric_limits<core_id_t>::max();
17
18 auto submit_to(core_id_t core, auto &&f) {
19 using ret_type = decltype(f());
20 if constexpr (is_errorated_future_v<ret_type>) {
21 auto ret = seastar::smp::submit_to(
22 core,
23 [f=std::move(f)]() mutable {
24 return f().to_base();
25 });
26 return ret_type(std::move(ret));
27 } else {
28 return seastar::smp::submit_to(core, std::move(f));
29 }
30 }
31
32 template <typename Obj, typename Method, typename... Args>
33 auto proxy_method_on_core(
34 core_id_t core, Obj &obj, Method method, Args&&... args) {
35 return crimson::submit_to(
36 core,
37 [&obj, method,
38 arg_tuple=std::make_tuple(std::forward<Args>(args)...)]() mutable {
39 return apply_method_to_tuple(obj, method, std::move(arg_tuple));
40 });
41 }
42
43 /**
44 * reactor_map_seq
45 *
46 * Invokes f on each reactor sequentially, Caller may assume that
47 * f will not be invoked concurrently on multiple cores.
48 */
49 template <typename F>
50 auto reactor_map_seq(F &&f) {
51 using ret_type = decltype(f());
52 if constexpr (is_errorated_future_v<ret_type>) {
53 auto ret = crimson::do_for_each(
54 seastar::smp::all_cpus().begin(),
55 seastar::smp::all_cpus().end(),
56 [f=std::move(f)](auto core) mutable {
57 return seastar::smp::submit_to(
58 core,
59 [&f] {
60 return std::invoke(f);
61 });
62 });
63 return ret_type(ret);
64 } else {
65 return seastar::do_for_each(
66 seastar::smp::all_cpus().begin(),
67 seastar::smp::all_cpus().end(),
68 [f=std::move(f)](auto core) mutable {
69 return seastar::smp::submit_to(
70 core,
71 [&f] {
72 return std::invoke(f);
73 });
74 });
75 }
76 }
77
78 /**
79 * sharded_map_seq
80 *
81 * Invokes f on each shard of t sequentially. Caller may assume that
82 * f will not be invoked concurrently on multiple cores.
83 */
84 template <typename T, typename F>
85 auto sharded_map_seq(T &t, F &&f) {
86 return reactor_map_seq(
87 [&t, f=std::forward<F>(f)]() mutable {
88 return std::invoke(f, t.local());
89 });
90 }
91
92 }