]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/examples/vm_power_manager/power_manager.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / examples / vm_power_manager / power_manager.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/un.h>
10 #include <fcntl.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <errno.h>
14
15 #include <sys/sysinfo.h>
16 #include <sys/types.h>
17
18 #include <rte_log.h>
19 #include <rte_power.h>
20 #include <rte_spinlock.h>
21
22 #include "channel_manager.h"
23 #include "power_manager.h"
24 #include "oob_monitor.h"
25
26 #define POWER_SCALE_CORE(DIRECTION, core_num , ret) do { \
27 if (core_num >= ci.core_count) \
28 return -1; \
29 if (!(ci.cd[core_num].global_enabled_cpus)) \
30 return -1; \
31 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); \
32 ret = rte_power_freq_##DIRECTION(core_num); \
33 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); \
34 } while (0)
35
36 #define POWER_SCALE_MASK(DIRECTION, core_mask, ret) do { \
37 int i; \
38 for (i = 0; core_mask; core_mask &= ~(1 << i++)) { \
39 if ((core_mask >> i) & 1) { \
40 if (!(ci.cd[i].global_enabled_cpus)) \
41 continue; \
42 rte_spinlock_lock(&global_core_freq_info[i].power_sl); \
43 if (rte_power_freq_##DIRECTION(i) != 1) \
44 ret = -1; \
45 rte_spinlock_unlock(&global_core_freq_info[i].power_sl); \
46 } \
47 } \
48 } while (0)
49
50 struct freq_info {
51 rte_spinlock_t power_sl;
52 uint32_t freqs[RTE_MAX_LCORE_FREQS];
53 unsigned num_freqs;
54 } __rte_cache_aligned;
55
56 static struct freq_info global_core_freq_info[POWER_MGR_MAX_CPUS];
57
58 struct core_info ci;
59
60 #define SYSFS_CPU_PATH "/sys/devices/system/cpu/cpu%u/topology/core_id"
61
62 struct core_info *
63 get_core_info(void)
64 {
65 return &ci;
66 }
67
68 int
69 core_info_init(void)
70 {
71 struct core_info *ci;
72 int i;
73
74 ci = get_core_info();
75
76 ci->core_count = get_nprocs_conf();
77 ci->branch_ratio_threshold = BRANCH_RATIO_THRESHOLD;
78 ci->cd = malloc(ci->core_count * sizeof(struct core_details));
79 if (!ci->cd) {
80 RTE_LOG(ERR, POWER_MANAGER, "Failed to allocate memory for core info.");
81 return -1;
82 }
83 for (i = 0; i < ci->core_count; i++) {
84 ci->cd[i].global_enabled_cpus = 1;
85 ci->cd[i].oob_enabled = 0;
86 ci->cd[i].msr_fd = 0;
87 }
88 printf("%d cores in system\n", ci->core_count);
89 return 0;
90 }
91
92 int
93 power_manager_init(void)
94 {
95 unsigned int i, num_cpus = 0, num_freqs = 0;
96 int ret = 0;
97 struct core_info *ci;
98
99 rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
100
101 ci = get_core_info();
102 if (!ci) {
103 RTE_LOG(ERR, POWER_MANAGER,
104 "Failed to get core info!\n");
105 return -1;
106 }
107
108 for (i = 0; i < ci->core_count; i++) {
109 if (ci->cd[i].global_enabled_cpus) {
110 if (rte_power_init(i) < 0)
111 RTE_LOG(ERR, POWER_MANAGER,
112 "Unable to initialize power manager "
113 "for core %u\n", i);
114 num_cpus++;
115 num_freqs = rte_power_freqs(i,
116 global_core_freq_info[i].freqs,
117 RTE_MAX_LCORE_FREQS);
118 if (num_freqs == 0) {
119 RTE_LOG(ERR, POWER_MANAGER,
120 "Unable to get frequency list for core %u\n",
121 i);
122 ci->cd[i].oob_enabled = 0;
123 ret = -1;
124 }
125 global_core_freq_info[i].num_freqs = num_freqs;
126
127 rte_spinlock_init(&global_core_freq_info[i].power_sl);
128 }
129 if (ci->cd[i].oob_enabled)
130 add_core_to_monitor(i);
131 }
132 RTE_LOG(INFO, POWER_MANAGER, "Managing %u cores out of %u available host cores\n",
133 num_cpus, ci->core_count);
134 return ret;
135
136 }
137
138 uint32_t
139 power_manager_get_current_frequency(unsigned core_num)
140 {
141 uint32_t freq, index;
142
143 if (core_num >= POWER_MGR_MAX_CPUS) {
144 RTE_LOG(ERR, POWER_MANAGER, "Core(%u) is out of range 0...%d\n",
145 core_num, POWER_MGR_MAX_CPUS-1);
146 return -1;
147 }
148 if (!(ci.cd[core_num].global_enabled_cpus))
149 return 0;
150
151 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
152 index = rte_power_get_freq(core_num);
153 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
154 if (index >= POWER_MGR_MAX_CPUS)
155 freq = 0;
156 else
157 freq = global_core_freq_info[core_num].freqs[index];
158
159 return freq;
160 }
161
162 int
163 power_manager_exit(void)
164 {
165 unsigned int i;
166 int ret = 0;
167 struct core_info *ci;
168
169 ci = get_core_info();
170 if (!ci) {
171 RTE_LOG(ERR, POWER_MANAGER,
172 "Failed to get core info!\n");
173 return -1;
174 }
175
176 for (i = 0; i < ci->core_count; i++) {
177 if (ci->cd[i].global_enabled_cpus) {
178 if (rte_power_exit(i) < 0) {
179 RTE_LOG(ERR, POWER_MANAGER, "Unable to shutdown power manager "
180 "for core %u\n", i);
181 ret = -1;
182 }
183 ci->cd[i].global_enabled_cpus = 0;
184 }
185 remove_core_from_monitor(i);
186 }
187 return ret;
188 }
189
190 int
191 power_manager_scale_mask_up(uint64_t core_mask)
192 {
193 int ret = 0;
194
195 POWER_SCALE_MASK(up, core_mask, ret);
196 return ret;
197 }
198
199 int
200 power_manager_scale_mask_down(uint64_t core_mask)
201 {
202 int ret = 0;
203
204 POWER_SCALE_MASK(down, core_mask, ret);
205 return ret;
206 }
207
208 int
209 power_manager_scale_mask_min(uint64_t core_mask)
210 {
211 int ret = 0;
212
213 POWER_SCALE_MASK(min, core_mask, ret);
214 return ret;
215 }
216
217 int
218 power_manager_scale_mask_max(uint64_t core_mask)
219 {
220 int ret = 0;
221
222 POWER_SCALE_MASK(max, core_mask, ret);
223 return ret;
224 }
225
226 int
227 power_manager_enable_turbo_mask(uint64_t core_mask)
228 {
229 int ret = 0;
230
231 POWER_SCALE_MASK(enable_turbo, core_mask, ret);
232 return ret;
233 }
234
235 int
236 power_manager_disable_turbo_mask(uint64_t core_mask)
237 {
238 int ret = 0;
239
240 POWER_SCALE_MASK(disable_turbo, core_mask, ret);
241 return ret;
242 }
243
244 int
245 power_manager_scale_core_up(unsigned core_num)
246 {
247 int ret = 0;
248
249 POWER_SCALE_CORE(up, core_num, ret);
250 return ret;
251 }
252
253 int
254 power_manager_scale_core_down(unsigned core_num)
255 {
256 int ret = 0;
257
258 POWER_SCALE_CORE(down, core_num, ret);
259 return ret;
260 }
261
262 int
263 power_manager_scale_core_min(unsigned core_num)
264 {
265 int ret = 0;
266
267 POWER_SCALE_CORE(min, core_num, ret);
268 return ret;
269 }
270
271 int
272 power_manager_scale_core_max(unsigned core_num)
273 {
274 int ret = 0;
275
276 POWER_SCALE_CORE(max, core_num, ret);
277 return ret;
278 }
279
280 int
281 power_manager_enable_turbo_core(unsigned int core_num)
282 {
283 int ret = 0;
284
285 POWER_SCALE_CORE(enable_turbo, core_num, ret);
286 return ret;
287 }
288
289 int
290 power_manager_disable_turbo_core(unsigned int core_num)
291 {
292 int ret = 0;
293
294 POWER_SCALE_CORE(disable_turbo, core_num, ret);
295 return ret;
296 }
297
298 int
299 power_manager_scale_core_med(unsigned int core_num)
300 {
301 int ret = 0;
302 struct core_info *ci;
303
304 ci = get_core_info();
305 if (core_num >= POWER_MGR_MAX_CPUS)
306 return -1;
307 if (!(ci->cd[core_num].global_enabled_cpus))
308 return -1;
309 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
310 ret = rte_power_set_freq(core_num,
311 global_core_freq_info[core_num].num_freqs / 2);
312 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
313 return ret;
314 }