]> git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/os/alienstore/thread_pool.h
import quincy beta 17.1.0
[ceph.git] / ceph / src / crimson / os / alienstore / thread_pool.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
2 // vim: ts=8 sw=2 smarttab expandtab
3 #pragma once
4
5 #include <atomic>
6 #include <condition_variable>
7 #include <tuple>
8 #include <type_traits>
9 #include <boost/lockfree/queue.hpp>
10 #include <boost/optional.hpp>
11 #include <seastar/core/future.hh>
12 #include <seastar/core/gate.hh>
13 #include <seastar/core/reactor.hh>
14 #include <seastar/core/semaphore.hh>
15 #include <seastar/core/sharded.hh>
16
17 #if __cplusplus > 201703L
18 #include <semaphore>
19 namespace crimson {
20 using std::counting_semaphore;
21 }
22 #else
23 #include "semaphore.h"
24 #endif
25
26 namespace crimson::os {
27
28 struct WorkItem {
29 virtual ~WorkItem() {}
30 virtual void process() = 0;
31 };
32
33 template<typename Func>
34 struct Task final : WorkItem {
35 using T = std::invoke_result_t<Func>;
36 using future_stored_type_t =
37 std::conditional_t<std::is_void_v<T>,
38 seastar::internal::future_stored_type_t<>,
39 seastar::internal::future_stored_type_t<T>>;
40 using futurator_t = seastar::futurize<T>;
41 public:
42 explicit Task(Func&& f)
43 : func(std::move(f))
44 {}
45 void process() override {
46 try {
47 if constexpr (std::is_void_v<T>) {
48 func();
49 state.set();
50 } else {
51 state.set(func());
52 }
53 } catch (...) {
54 state.set_exception(std::current_exception());
55 }
56 on_done.write_side().signal(1);
57 }
58 typename futurator_t::type get_future() {
59 return on_done.wait().then([this](size_t) {
60 if (state.failed()) {
61 return futurator_t::make_exception_future(state.get_exception());
62 } else {
63 return futurator_t::from_tuple(state.get_value());
64 }
65 });
66 }
67 private:
68 Func func;
69 seastar::future_state<future_stored_type_t> state;
70 seastar::readable_eventfd on_done;
71 };
72
73 struct SubmitQueue {
74 seastar::semaphore free_slots;
75 seastar::gate pending_tasks;
76 explicit SubmitQueue(size_t num_free_slots)
77 : free_slots(num_free_slots)
78 {}
79 seastar::future<> stop() {
80 return pending_tasks.close();
81 }
82 };
83
84 struct ShardedWorkQueue {
85 public:
86 WorkItem* pop_front(std::chrono::milliseconds& queue_max_wait) {
87 if (sem.try_acquire_for(queue_max_wait)) {
88 if (!is_stopping()) {
89 WorkItem* work_item = nullptr;
90 [[maybe_unused]] bool popped = pending.pop(work_item);
91 assert(popped);
92 return work_item;
93 }
94 }
95 return nullptr;
96 }
97 void stop() {
98 stopping = true;
99 sem.release();
100 }
101 void push_back(WorkItem* work_item) {
102 [[maybe_unused]] bool pushed = pending.push(work_item);
103 assert(pushed);
104 sem.release();
105 }
106 private:
107 bool is_stopping() const {
108 return stopping;
109 }
110 std::atomic<bool> stopping = false;
111 static constexpr unsigned QUEUE_SIZE = 128;
112 crimson::counting_semaphore<QUEUE_SIZE> sem{0};
113 boost::lockfree::queue<WorkItem*> pending{QUEUE_SIZE};
114 };
115
116 /// an engine for scheduling non-seastar tasks from seastar fibers
117 class ThreadPool {
118 public:
119 /**
120 * @param queue_sz the depth of pending queue. before a task is scheduled,
121 * it waits in this queue. we will round this number to
122 * multiple of the number of cores.
123 * @param n_threads the number of threads in this thread pool.
124 * @param cpu the CPU core to which this thread pool is assigned
125 * @note each @c Task has its own crimson::thread::Condition, which possesses
126 * an fd, so we should keep the size of queue under a reasonable limit.
127 */
128 ThreadPool(size_t n_threads, size_t queue_sz, std::vector<uint64_t> cpus);
129 ~ThreadPool();
130 seastar::future<> start();
131 seastar::future<> stop();
132 size_t size() {
133 return n_threads;
134 }
135 template<typename Func, typename...Args>
136 auto submit(int shard, Func&& func, Args&&... args) {
137 auto packaged = [func=std::move(func),
138 args=std::forward_as_tuple(args...)] {
139 return std::apply(std::move(func), std::move(args));
140 };
141 return seastar::with_gate(submit_queue.local().pending_tasks,
142 [packaged=std::move(packaged), shard, this] {
143 return local_free_slots().wait()
144 .then([packaged=std::move(packaged), shard, this] {
145 auto task = new Task{std::move(packaged)};
146 auto fut = task->get_future();
147 pending_queues[shard].push_back(task);
148 return fut.finally([task, this] {
149 local_free_slots().signal();
150 delete task;
151 });
152 });
153 });
154 }
155
156 template<typename Func>
157 auto submit(Func&& func) {
158 return submit(::rand() % n_threads, std::forward<Func>(func));
159 }
160
161 private:
162 void loop(std::chrono::milliseconds queue_max_wait, size_t shard);
163 bool is_stopping() const {
164 return stopping.load(std::memory_order_relaxed);
165 }
166 static void pin(const std::vector<uint64_t>& cpus);
167 static void block_sighup();
168 seastar::semaphore& local_free_slots() {
169 return submit_queue.local().free_slots;
170 }
171 ThreadPool(const ThreadPool&) = delete;
172 ThreadPool& operator=(const ThreadPool&) = delete;
173
174 private:
175 size_t n_threads;
176 std::atomic<bool> stopping = false;
177 std::vector<std::thread> threads;
178 seastar::sharded<SubmitQueue> submit_queue;
179 const size_t queue_size;
180 std::vector<ShardedWorkQueue> pending_queues;
181 };
182
183 } // namespace crimson::os