]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/cpuidle/governors/menu.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / cpuidle / governors / menu.c
CommitLineData
7925f8f7 1// SPDX-License-Identifier: GPL-2.0-only
4f86d3a8
LB
2/*
3 * menu.c - the menu idle governor
4 *
5 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
69d25870
AV
6 * Copyright (C) 2009 Intel Corporation
7 * Author:
8 * Arjan van de Ven <arjan@linux.intel.com>
4f86d3a8
LB
9 */
10
11#include <linux/kernel.h>
12#include <linux/cpuidle.h>
4f86d3a8
LB
13#include <linux/time.h>
14#include <linux/ktime.h>
15#include <linux/hrtimer.h>
16#include <linux/tick.h>
69d25870 17#include <linux/sched.h>
4f17722c 18#include <linux/sched/loadavg.h>
03441a34 19#include <linux/sched/stat.h>
5787536e 20#include <linux/math64.h>
4f86d3a8 21
decd51bb
TT
22/*
23 * Please note when changing the tuning values:
24 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
25 * a scaling operation multiplication may overflow on 32 bit platforms.
26 * In that case, #define RESOLUTION as ULL to get 64 bit result:
27 * #define RESOLUTION 1024ULL
28 *
29 * The default values do not overflow.
30 */
69d25870 31#define BUCKETS 12
ae779300
MG
32#define INTERVAL_SHIFT 3
33#define INTERVALS (1UL << INTERVAL_SHIFT)
69d25870 34#define RESOLUTION 1024
1f85f87d 35#define DECAY 8
69d25870 36#define MAX_INTERESTING 50000
1f85f87d 37
69d25870
AV
38
39/*
40 * Concepts and ideas behind the menu governor
41 *
42 * For the menu governor, there are 3 decision factors for picking a C
43 * state:
44 * 1) Energy break even point
45 * 2) Performance impact
46 * 3) Latency tolerance (from pmqos infrastructure)
47 * These these three factors are treated independently.
48 *
49 * Energy break even point
50 * -----------------------
51 * C state entry and exit have an energy cost, and a certain amount of time in
52 * the C state is required to actually break even on this cost. CPUIDLE
53 * provides us this duration in the "target_residency" field. So all that we
54 * need is a good prediction of how long we'll be idle. Like the traditional
55 * menu governor, we start with the actual known "next timer event" time.
56 *
57 * Since there are other source of wakeups (interrupts for example) than
58 * the next timer event, this estimation is rather optimistic. To get a
59 * more realistic estimate, a correction factor is applied to the estimate,
60 * that is based on historic behavior. For example, if in the past the actual
61 * duration always was 50% of the next timer tick, the correction factor will
62 * be 0.5.
63 *
64 * menu uses a running average for this correction factor, however it uses a
65 * set of factors, not just a single factor. This stems from the realization
66 * that the ratio is dependent on the order of magnitude of the expected
67 * duration; if we expect 500 milliseconds of idle time the likelihood of
68 * getting an interrupt very early is much higher than if we expect 50 micro
69 * seconds of idle time. A second independent factor that has big impact on
70 * the actual factor is if there is (disk) IO outstanding or not.
71 * (as a special twist, we consider every sleep longer than 50 milliseconds
72 * as perfect; there are no power gains for sleeping longer than this)
73 *
74 * For these two reasons we keep an array of 12 independent factors, that gets
75 * indexed based on the magnitude of the expected duration as well as the
76 * "is IO outstanding" property.
77 *
1f85f87d
AV
78 * Repeatable-interval-detector
79 * ----------------------------
80 * There are some cases where "next timer" is a completely unusable predictor:
81 * Those cases where the interval is fixed, for example due to hardware
82 * interrupt mitigation, but also due to fixed transfer rate devices such as
83 * mice.
84 * For this, we use a different predictor: We track the duration of the last 8
85 * intervals and if the stand deviation of these 8 intervals is below a
86 * threshold value, we use the average of these intervals as prediction.
87 *
69d25870
AV
88 * Limiting Performance Impact
89 * ---------------------------
90 * C states, especially those with large exit latencies, can have a real
20e3341b 91 * noticeable impact on workloads, which is not acceptable for most sysadmins,
69d25870
AV
92 * and in addition, less performance has a power price of its own.
93 *
94 * As a general rule of thumb, menu assumes that the following heuristic
95 * holds:
96 * The busier the system, the less impact of C states is acceptable
97 *
98 * This rule-of-thumb is implemented using a performance-multiplier:
99 * If the exit latency times the performance multiplier is longer than
100 * the predicted duration, the C state is not considered a candidate
101 * for selection due to a too high performance impact. So the higher
102 * this multiplier is, the longer we need to be idle to pick a deep C
103 * state, and thus the less likely a busy CPU will hit such a deep
104 * C state.
105 *
106 * Two factors are used in determing this multiplier:
107 * a value of 10 is added for each point of "per cpu load average" we have.
108 * a value of 5 points is added for each process that is waiting for
109 * IO on this CPU.
110 * (these values are experimentally determined)
111 *
112 * The load average factor gives a longer term (few seconds) input to the
113 * decision, while the iowait value gives a cpu local instantanious input.
114 * The iowait factor may look low, but realize that this is also already
115 * represented in the system load average.
116 *
117 */
4f86d3a8
LB
118
119struct menu_device {
120 int last_state_idx;
672917dc 121 int needs_update;
45f1ff59 122 int tick_wakeup;
4f86d3a8 123
5dc2f5a3 124 unsigned int next_timer_us;
69d25870 125 unsigned int bucket;
51f245b8 126 unsigned int correction_factor[BUCKETS];
939e33b7 127 unsigned int intervals[INTERVALS];
1f85f87d 128 int interval_ptr;
4f86d3a8
LB
129};
130
64b4ca5c 131static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
69d25870
AV
132{
133 int bucket = 0;
134
135 /*
136 * We keep two groups of stats; one with no
137 * IO pending, one without.
138 * This allows us to calculate
139 * E(duration)|iowait
140 */
64b4ca5c 141 if (nr_iowaiters)
69d25870
AV
142 bucket = BUCKETS/2;
143
144 if (duration < 10)
145 return bucket;
146 if (duration < 100)
147 return bucket + 1;
148 if (duration < 1000)
149 return bucket + 2;
150 if (duration < 10000)
151 return bucket + 3;
152 if (duration < 100000)
153 return bucket + 4;
154 return bucket + 5;
155}
156
157/*
158 * Return a multiplier for the exit latency that is intended
159 * to take performance requirements into account.
160 * The more performance critical we estimate the system
161 * to be, the higher this multiplier, and thus the higher
162 * the barrier to go to an expensive C state.
163 */
a7fe5190 164static inline int performance_multiplier(unsigned long nr_iowaiters)
69d25870 165{
a7fe5190
DL
166 /* for IO wait tasks (per cpu!) we add 10x each */
167 return 1 + 10 * nr_iowaiters;
69d25870
AV
168}
169
4f86d3a8
LB
170static DEFINE_PER_CPU(struct menu_device, menu_devices);
171
46bcfad7 172static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
672917dc 173
1f85f87d
AV
174/*
175 * Try detecting repeating patterns by keeping track of the last 8
176 * intervals, and checking if the standard deviation of that set
177 * of points is below a threshold. If it is... then use the
178 * average of these 8 points as the estimated value.
179 */
f1c8e410
RW
180static unsigned int get_typical_interval(struct menu_device *data,
181 unsigned int predicted_us)
1f85f87d 182{
4cd46bca 183 int i, divisor;
f1c8e410 184 unsigned int min, max, thresh, avg;
3b99669b 185 uint64_t sum, variance;
0e96d5ad 186
814b8797 187 thresh = INT_MAX; /* Discard outliers above this value */
1f85f87d 188
c96ca4fb 189again:
1f85f87d 190
0e96d5ad 191 /* First calculate the average of past intervals */
f1c8e410 192 min = UINT_MAX;
4cd46bca 193 max = 0;
3b99669b 194 sum = 0;
4cd46bca 195 divisor = 0;
c96ca4fb 196 for (i = 0; i < INTERVALS; i++) {
0e96d5ad 197 unsigned int value = data->intervals[i];
c96ca4fb 198 if (value <= thresh) {
3b99669b 199 sum += value;
c96ca4fb
YS
200 divisor++;
201 if (value > max)
202 max = value;
f1c8e410
RW
203
204 if (value < min)
205 min = value;
c96ca4fb
YS
206 }
207 }
f1c8e410
RW
208
209 /*
210 * If the result of the computation is going to be discarded anyway,
211 * avoid the computation altogether.
212 */
213 if (min >= predicted_us)
214 return UINT_MAX;
215
ae779300 216 if (divisor == INTERVALS)
3b99669b 217 avg = sum >> INTERVAL_SHIFT;
ae779300 218 else
3b99669b 219 avg = div_u64(sum, divisor);
c96ca4fb 220
7024b18c
RV
221 /* Then try to determine variance */
222 variance = 0;
c96ca4fb 223 for (i = 0; i < INTERVALS; i++) {
0e96d5ad 224 unsigned int value = data->intervals[i];
c96ca4fb 225 if (value <= thresh) {
3b99669b 226 int64_t diff = (int64_t)value - avg;
7024b18c 227 variance += diff * diff;
c96ca4fb
YS
228 }
229 }
ae779300 230 if (divisor == INTERVALS)
7024b18c 231 variance >>= INTERVAL_SHIFT;
ae779300 232 else
7024b18c 233 do_div(variance, divisor);
ae779300 234
1f85f87d 235 /*
7024b18c
RV
236 * The typical interval is obtained when standard deviation is
237 * small (stddev <= 20 us, variance <= 400 us^2) or standard
238 * deviation is small compared to the average interval (avg >
239 * 6*stddev, avg^2 > 36*variance). The average is smaller than
240 * UINT_MAX aka U32_MAX, so computing its square does not
241 * overflow a u64. We simply reject this candidate average if
242 * the standard deviation is greater than 715 s (which is
243 * rather unlikely).
0d6a7ffa 244 *
330647a9 245 * Use this result only if there is no timer to wake us up sooner.
1f85f87d 246 */
7024b18c 247 if (likely(variance <= U64_MAX/36)) {
3b99669b 248 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
7024b18c 249 || variance <= 400) {
e132b9b3 250 return avg;
0d6a7ffa 251 }
69a37bea 252 }
017099e2
TT
253
254 /*
255 * If we have outliers to the upside in our distribution, discard
256 * those by setting the threshold to exclude these outliers, then
257 * calculate the average and standard deviation again. Once we get
258 * down to the bottom 3/4 of our samples, stop excluding samples.
259 *
260 * This can deal with workloads that have long pauses interspersed
261 * with sporadic activity with a bunch of short pauses.
262 */
263 if ((divisor * 4) <= INTERVALS * 3)
e132b9b3 264 return UINT_MAX;
017099e2
TT
265
266 thresh = max - 1;
267 goto again;
1f85f87d
AV
268}
269
4f86d3a8
LB
270/**
271 * menu_select - selects the next idle state to enter
46bcfad7 272 * @drv: cpuidle driver containing state data
4f86d3a8 273 * @dev: the CPU
45f1ff59 274 * @stop_tick: indication on whether or not to stop the tick
4f86d3a8 275 */
45f1ff59
RW
276static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
277 bool *stop_tick)
4f86d3a8 278{
229b6863 279 struct menu_device *data = this_cpu_ptr(&menu_devices);
0fc784fb 280 int latency_req = cpuidle_governor_latency_req(dev->cpu);
4f86d3a8 281 int i;
3ed09c94 282 int idx;
96e95182 283 unsigned int interactivity_req;
03dba278 284 unsigned int predicted_us;
a7fe5190 285 unsigned long nr_iowaiters;
296bb1e5 286 ktime_t delta_next;
69d25870 287
672917dc 288 if (data->needs_update) {
46bcfad7 289 menu_update(drv, dev);
672917dc
CZ
290 data->needs_update = 0;
291 }
292
69d25870 293 /* determine the expected residency time, round up */
296bb1e5 294 data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
69d25870 295
a7fe5190 296 nr_iowaiters = nr_iowait_cpu(dev->cpu);
64b4ca5c 297 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
69d25870 298
53812cdc 299 if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
8b007ebe
RW
300 ((data->next_timer_us < drv->states[1].target_residency ||
301 latency_req < drv->states[1].exit_latency) &&
302 !drv->states[0].disabled && !dev->states_usage[0].disable)) {
303 /*
304 * In this case state[0] will be used no matter what, so return
305 * it right away and keep the tick running.
306 */
307 *stop_tick = false;
308 return 0;
309 }
310
51f245b8
TT
311 /*
312 * Force the result of multiplication to be 64 bits even if both
313 * operands are 32 bits.
314 * Make sure to round up for half microseconds.
315 */
03dba278 316 predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
51f245b8 317 data->correction_factor[data->bucket],
5787536e 318 RESOLUTION * DECAY);
e132b9b3
RR
319 /*
320 * Use the lowest expected idle interval to pick the idle state.
321 */
f1c8e410 322 predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
e132b9b3 323
87c9fe6e
RW
324 if (tick_nohz_tick_stopped()) {
325 /*
326 * If the tick is already stopped, the cost of possible short
327 * idle duration misprediction is much higher, because the CPU
328 * may be stuck in a shallow idle state for a long time as a
5ef499cd
RW
329 * result of it. In that case say we might mispredict and use
330 * the known time till the closest timer event for the idle
331 * state selection.
87c9fe6e 332 */
03dba278
RW
333 if (predicted_us < TICK_USEC)
334 predicted_us = ktime_to_us(delta_next);
87c9fe6e
RW
335 } else {
336 /*
337 * Use the performance multiplier and the user-configurable
338 * latency_req to determine the maximum exit latency.
339 */
a7fe5190 340 interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
87c9fe6e
RW
341 if (latency_req > interactivity_req)
342 latency_req = interactivity_req;
343 }
e132b9b3 344
71abbbf8
AL
345 /*
346 * Find the idle state with the lowest power while satisfying
347 * our constraints.
348 */
3ed09c94 349 idx = -1;
96c3d11d 350 for (i = 0; i < drv->state_count; i++) {
46bcfad7 351 struct cpuidle_state *s = &drv->states[i];
dc7fd275 352 struct cpuidle_state_usage *su = &dev->states_usage[i];
4f86d3a8 353
cbc9ef02 354 if (s->disabled || su->disable)
3a53396b 355 continue;
96c3d11d 356
3ed09c94
NP
357 if (idx == -1)
358 idx = i; /* first enabled state */
96c3d11d 359
03dba278 360 if (s->target_residency > predicted_us) {
96c3d11d
RW
361 /*
362 * Use a physical idle state, not busy polling, unless
bde091ec 363 * a timer is going to trigger soon enough.
96c3d11d
RW
364 */
365 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
bde091ec
RW
366 s->exit_latency <= latency_req &&
367 s->target_residency <= data->next_timer_us) {
368 predicted_us = s->target_residency;
96c3d11d
RW
369 idx = i;
370 break;
371 }
03dba278 372 if (predicted_us < TICK_USEC)
5ef499cd
RW
373 break;
374
757ab15c
RW
375 if (!tick_nohz_tick_stopped()) {
376 /*
377 * If the state selected so far is shallow,
378 * waking up early won't hurt, so retain the
379 * tick in that case and let the governor run
380 * again in the next iteration of the loop.
381 */
03dba278 382 predicted_us = drv->states[idx].target_residency;
757ab15c
RW
383 break;
384 }
385
5ef499cd
RW
386 /*
387 * If the state selected so far is shallow and this
388 * state's target residency matches the time till the
389 * closest timer event, select this one to avoid getting
390 * stuck in the shallow one for too long.
391 */
392 if (drv->states[idx].target_residency < TICK_USEC &&
393 s->target_residency <= ktime_to_us(delta_next))
394 idx = i;
395
eb40a380 396 return idx;
5ef499cd 397 }
45f1ff59
RW
398 if (s->exit_latency > latency_req) {
399 /*
400 * If we break out of the loop for latency reasons, use
401 * the target residency of the selected state as the
402 * expected idle duration so that the tick is retained
403 * as long as that target residency is low enough.
404 */
03dba278 405 predicted_us = drv->states[idx].target_residency;
8e37e1a2 406 break;
45f1ff59 407 }
3ed09c94 408 idx = i;
4f86d3a8
LB
409 }
410
3ed09c94
NP
411 if (idx == -1)
412 idx = 0; /* No states enabled. Must use 0. */
413
45f1ff59
RW
414 /*
415 * Don't stop the tick if the selected state is a polling one or if the
416 * expected idle duration is shorter than the tick period length.
417 */
5ef499cd 418 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
03dba278 419 predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
296bb1e5
RW
420 unsigned int delta_next_us = ktime_to_us(delta_next);
421
45f1ff59
RW
422 *stop_tick = false;
423
5ef499cd 424 if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
296bb1e5
RW
425 /*
426 * The tick is not going to be stopped and the target
427 * residency of the state to be returned is not within
428 * the time until the next timer event including the
429 * tick, so try to correct that.
430 */
431 for (i = idx - 1; i >= 0; i--) {
f390c5eb
RW
432 if (drv->states[i].disabled ||
433 dev->states_usage[i].disable)
296bb1e5
RW
434 continue;
435
436 idx = i;
437 if (drv->states[i].target_residency <= delta_next_us)
438 break;
439 }
440 }
441 }
442
eb40a380 443 return idx;
4f86d3a8
LB
444}
445
446/**
672917dc 447 * menu_reflect - records that data structures need update
4f86d3a8 448 * @dev: the CPU
e978aa7d 449 * @index: the index of actual entered state
4f86d3a8
LB
450 *
451 * NOTE: it's important to be fast here because this operation will add to
452 * the overall exit latency.
453 */
e978aa7d 454static void menu_reflect(struct cpuidle_device *dev, int index)
672917dc 455{
229b6863 456 struct menu_device *data = this_cpu_ptr(&menu_devices);
a802ea96 457
e978aa7d 458 data->last_state_idx = index;
a802ea96 459 data->needs_update = 1;
45f1ff59 460 data->tick_wakeup = tick_nohz_idle_got_tick();
672917dc
CZ
461}
462
463/**
464 * menu_update - attempts to guess what happened after entry
46bcfad7 465 * @drv: cpuidle driver containing state data
672917dc
CZ
466 * @dev: the CPU
467 */
46bcfad7 468static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
4f86d3a8 469{
229b6863 470 struct menu_device *data = this_cpu_ptr(&menu_devices);
4f86d3a8 471 int last_idx = data->last_state_idx;
46bcfad7 472 struct cpuidle_state *target = &drv->states[last_idx];
320eee77 473 unsigned int measured_us;
51f245b8 474 unsigned int new_factor;
4f86d3a8
LB
475
476 /*
61c66d6e 477 * Try to figure out how much time passed between entry to low
478 * power state and occurrence of the wakeup event.
479 *
480 * If the entered idle state didn't support residency measurements,
4108b3d9
LB
481 * we use them anyway if they are short, and if long,
482 * truncate to the whole expected time.
61c66d6e 483 *
484 * Any measured amount of time will include the exit latency.
485 * Since we are interested in when the wakeup begun, not when it
2fba5376 486 * was completed, we must subtract the exit latency. However, if
61c66d6e 487 * the measured amount of time is less than the exit latency,
488 * assume the state was never reached and the exit latency is 0.
4f86d3a8 489 */
69d25870 490
45f1ff59
RW
491 if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
492 /*
493 * The nohz code said that there wouldn't be any events within
494 * the tick boundary (if the tick was stopped), but the idle
495 * duration predictor had a differing opinion. Since the CPU
496 * was woken up by a tick (that wasn't stopped after all), the
497 * predictor was not quite right, so assume that the CPU could
498 * have been idle long (but not forever) to help the idle
499 * duration predictor do a better job next time.
500 */
501 measured_us = 9 * MAX_INTERESTING / 10;
5f26bdce
RW
502 } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
503 dev->poll_time_limit) {
504 /*
505 * The CPU exited the "polling" state due to a time limit, so
506 * the idle duration prediction leading to the selection of that
507 * state was inaccurate. If a better prediction had been made,
508 * the CPU might have been woken up from idle by the next timer.
509 * Assume that to be the case.
510 */
511 measured_us = data->next_timer_us;
45f1ff59
RW
512 } else {
513 /* measured value */
6a5f95b5 514 measured_us = dev->last_residency;
45f1ff59
RW
515
516 /* Deduct exit latency */
517 if (measured_us > 2 * target->exit_latency)
518 measured_us -= target->exit_latency;
519 else
520 measured_us /= 2;
521 }
69d25870 522
4108b3d9
LB
523 /* Make sure our coefficients do not exceed unity */
524 if (measured_us > data->next_timer_us)
525 measured_us = data->next_timer_us;
69d25870 526
51f245b8
TT
527 /* Update our correction ratio */
528 new_factor = data->correction_factor[data->bucket];
529 new_factor -= new_factor / DECAY;
69d25870 530
5dc2f5a3 531 if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
532 new_factor += RESOLUTION * measured_us / data->next_timer_us;
320eee77 533 else
69d25870
AV
534 /*
535 * we were idle so long that we count it as a perfect
536 * prediction
537 */
538 new_factor += RESOLUTION;
320eee77 539
69d25870
AV
540 /*
541 * We don't want 0 as factor; we always want at least
51f245b8
TT
542 * a tiny bit of estimated time. Fortunately, due to rounding,
543 * new_factor will stay nonzero regardless of measured_us values
544 * and the compiler can eliminate this test as long as DECAY > 1.
69d25870 545 */
51f245b8 546 if (DECAY == 1 && unlikely(new_factor == 0))
69d25870 547 new_factor = 1;
320eee77 548
69d25870 549 data->correction_factor[data->bucket] = new_factor;
1f85f87d
AV
550
551 /* update the repeating-pattern data */
61c66d6e 552 data->intervals[data->interval_ptr++] = measured_us;
1f85f87d
AV
553 if (data->interval_ptr >= INTERVALS)
554 data->interval_ptr = 0;
4f86d3a8
LB
555}
556
557/**
558 * menu_enable_device - scans a CPU's states and does setup
46bcfad7 559 * @drv: cpuidle driver
4f86d3a8
LB
560 * @dev: the CPU
561 */
46bcfad7
DD
562static int menu_enable_device(struct cpuidle_driver *drv,
563 struct cpuidle_device *dev)
4f86d3a8
LB
564{
565 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
bed4d597 566 int i;
4f86d3a8
LB
567
568 memset(data, 0, sizeof(struct menu_device));
569
bed4d597
CK
570 /*
571 * if the correction factor is 0 (eg first time init or cpu hotplug
572 * etc), we actually want to start out with a unity factor.
573 */
574 for(i = 0; i < BUCKETS; i++)
575 data->correction_factor[i] = RESOLUTION * DECAY;
576
4f86d3a8
LB
577 return 0;
578}
579
580static struct cpuidle_governor menu_governor = {
581 .name = "menu",
582 .rating = 20,
583 .enable = menu_enable_device,
584 .select = menu_select,
585 .reflect = menu_reflect,
4f86d3a8
LB
586};
587
588/**
589 * init_menu - initializes the governor
590 */
591static int __init init_menu(void)
592{
593 return cpuidle_register_governor(&menu_governor);
594}
595
137b944e 596postcore_initcall(init_menu);