// SPDX-License-Identifier: GPL-2.0-only1/*2* menu.c - the menu idle governor3*4* Copyright (C) 2006-2007 Adam Belay <[email protected]>5* Copyright (C) 2009 Intel Corporation6* Author:7* Arjan van de Ven <[email protected]>8*/910#include <linux/kernel.h>11#include <linux/cpuidle.h>12#include <linux/time.h>13#include <linux/ktime.h>14#include <linux/hrtimer.h>15#include <linux/tick.h>16#include <linux/sched/stat.h>17#include <linux/math64.h>1819#include "gov.h"2021#define BUCKETS 622#define INTERVAL_SHIFT 323#define INTERVALS (1UL << INTERVAL_SHIFT)24#define RESOLUTION 102425#define DECAY 826#define MAX_INTERESTING (50000 * NSEC_PER_USEC)2728/*29* Concepts and ideas behind the menu governor30*31* For the menu governor, there are 2 decision factors for picking a C32* state:33* 1) Energy break even point34* 2) Latency tolerance (from pmqos infrastructure)35* These two factors are treated independently.36*37* Energy break even point38* -----------------------39* C state entry and exit have an energy cost, and a certain amount of time in40* the C state is required to actually break even on this cost. CPUIDLE41* provides us this duration in the "target_residency" field. So all that we42* need is a good prediction of how long we'll be idle. Like the traditional43* menu governor, we take the actual known "next timer event" time.44*45* Since there are other source of wakeups (interrupts for example) than46* the next timer event, this estimation is rather optimistic. To get a47* more realistic estimate, a correction factor is applied to the estimate,48* that is based on historic behavior. For example, if in the past the actual49* duration always was 50% of the next timer tick, the correction factor will50* be 0.5.51*52* menu uses a running average for this correction factor, but it uses a set of53* factors, not just a single factor. This stems from the realization that the54* ratio is dependent on the order of magnitude of the expected duration; if we55* expect 500 milliseconds of idle time the likelihood of getting an interrupt56* very early is much higher than if we expect 50 micro seconds of idle time.57* For this reason, menu keeps an array of 6 independent factors, that gets58* indexed based on the magnitude of the expected duration.59*60* Repeatable-interval-detector61* ----------------------------62* There are some cases where "next timer" is a completely unusable predictor:63* Those cases where the interval is fixed, for example due to hardware64* interrupt mitigation, but also due to fixed transfer rate devices like mice.65* For this, we use a different predictor: We track the duration of the last 866* intervals and use them to estimate the duration of the next one.67*/6869struct menu_device {70int needs_update;71int tick_wakeup;7273u64 next_timer_ns;74unsigned int bucket;75unsigned int correction_factor[BUCKETS];76unsigned int intervals[INTERVALS];77int interval_ptr;78};7980static inline int which_bucket(u64 duration_ns)81{82int bucket = 0;8384if (duration_ns < 10ULL * NSEC_PER_USEC)85return bucket;86if (duration_ns < 100ULL * NSEC_PER_USEC)87return bucket + 1;88if (duration_ns < 1000ULL * NSEC_PER_USEC)89return bucket + 2;90if (duration_ns < 10000ULL * NSEC_PER_USEC)91return bucket + 3;92if (duration_ns < 100000ULL * NSEC_PER_USEC)93return bucket + 4;94return bucket + 5;95}9697static DEFINE_PER_CPU(struct menu_device, menu_devices);9899static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)100{101/* Update the repeating-pattern data. */102data->intervals[data->interval_ptr++] = interval_us;103if (data->interval_ptr >= INTERVALS)104data->interval_ptr = 0;105}106107static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);108109/*110* Try detecting repeating patterns by keeping track of the last 8111* intervals, and checking if the standard deviation of that set112* of points is below a threshold. If it is... then use the113* average of these 8 points as the estimated value.114*/115static unsigned int get_typical_interval(struct menu_device *data)116{117s64 value, min_thresh = -1, max_thresh = UINT_MAX;118unsigned int max, min, divisor;119u64 avg, variance, avg_sq;120int i;121122again:123/* Compute the average and variance of past intervals. */124max = 0;125min = UINT_MAX;126avg = 0;127variance = 0;128divisor = 0;129for (i = 0; i < INTERVALS; i++) {130value = data->intervals[i];131/*132* Discard the samples outside the interval between the min and133* max thresholds.134*/135if (value <= min_thresh || value >= max_thresh)136continue;137138divisor++;139140avg += value;141variance += value * value;142143if (value > max)144max = value;145146if (value < min)147min = value;148}149150if (!max)151return UINT_MAX;152153if (divisor == INTERVALS) {154avg >>= INTERVAL_SHIFT;155variance >>= INTERVAL_SHIFT;156} else {157do_div(avg, divisor);158do_div(variance, divisor);159}160161avg_sq = avg * avg;162variance -= avg_sq;163164/*165* The typical interval is obtained when standard deviation is166* small (stddev <= 20 us, variance <= 400 us^2) or standard167* deviation is small compared to the average interval (avg >168* 6*stddev, avg^2 > 36*variance). The average is smaller than169* UINT_MAX aka U32_MAX, so computing its square does not170* overflow a u64. We simply reject this candidate average if171* the standard deviation is greater than 715 s (which is172* rather unlikely).173*174* Use this result only if there is no timer to wake us up sooner.175*/176if (likely(variance <= U64_MAX/36)) {177if ((avg_sq > variance * 36 && divisor * 4 >= INTERVALS * 3) ||178variance <= 400)179return avg;180}181182/*183* If there are outliers, discard them by setting thresholds to exclude184* data points at a large enough distance from the average, then185* calculate the average and standard deviation again. Once we get186* down to the last 3/4 of our samples, stop excluding samples.187*188* This can deal with workloads that have long pauses interspersed189* with sporadic activity with a bunch of short pauses.190*/191if (divisor * 4 <= INTERVALS * 3) {192/*193* If there are sufficiently many data points still under194* consideration after the outliers have been eliminated,195* returning without a prediction would be a mistake because it196* is likely that the next interval will not exceed the current197* maximum, so return the latter in that case.198*/199if (divisor >= INTERVALS / 2)200return max;201202return UINT_MAX;203}204205/* Update the thresholds for the next round. */206if (avg - min > max - avg)207min_thresh = min;208else209max_thresh = max;210211goto again;212}213214/**215* menu_select - selects the next idle state to enter216* @drv: cpuidle driver containing state data217* @dev: the CPU218* @stop_tick: indication on whether or not to stop the tick219*/220static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,221bool *stop_tick)222{223struct menu_device *data = this_cpu_ptr(&menu_devices);224s64 latency_req = cpuidle_governor_latency_req(dev->cpu);225u64 predicted_ns;226ktime_t delta, delta_tick;227int i, idx;228229if (data->needs_update) {230menu_update(drv, dev);231data->needs_update = 0;232} else if (!dev->last_residency_ns) {233/*234* This happens when the driver rejects the previously selected235* idle state and returns an error, so update the recent236* intervals table to prevent invalid information from being237* used going forward.238*/239menu_update_intervals(data, UINT_MAX);240}241242/* Find the shortest expected idle interval. */243predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;244if (predicted_ns > RESIDENCY_THRESHOLD_NS) {245unsigned int timer_us;246247/* Determine the time till the closest timer. */248delta = tick_nohz_get_sleep_length(&delta_tick);249if (unlikely(delta < 0)) {250delta = 0;251delta_tick = 0;252}253254data->next_timer_ns = delta;255data->bucket = which_bucket(data->next_timer_ns);256257/* Round up the result for half microseconds. */258timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +259data->next_timer_ns *260data->correction_factor[data->bucket],261RESOLUTION * DECAY * NSEC_PER_USEC);262/* Use the lowest expected idle interval to pick the idle state. */263predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns);264} else {265/*266* Because the next timer event is not going to be determined267* in this case, assume that without the tick the closest timer268* will be in distant future and that the closest tick will occur269* after 1/2 of the tick period.270*/271data->next_timer_ns = KTIME_MAX;272delta_tick = TICK_NSEC / 2;273data->bucket = BUCKETS - 1;274}275276if (unlikely(drv->state_count <= 1 || latency_req == 0) ||277((data->next_timer_ns < drv->states[1].target_residency_ns ||278latency_req < drv->states[1].exit_latency_ns) &&279!dev->states_usage[0].disable)) {280/*281* In this case state[0] will be used no matter what, so return282* it right away and keep the tick running if state[0] is a283* polling one.284*/285*stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);286return 0;287}288289/*290* If the tick is already stopped, the cost of possible short idle291* duration misprediction is much higher, because the CPU may be stuck292* in a shallow idle state for a long time as a result of it. In that293* case, say we might mispredict and use the known time till the closest294* timer event for the idle state selection.295*/296if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC)297predicted_ns = data->next_timer_ns;298299/*300* Find the idle state with the lowest power while satisfying301* our constraints.302*/303idx = -1;304for (i = 0; i < drv->state_count; i++) {305struct cpuidle_state *s = &drv->states[i];306307if (dev->states_usage[i].disable)308continue;309310if (idx == -1)311idx = i; /* first enabled state */312313if (s->exit_latency_ns > latency_req)314break;315316if (s->target_residency_ns <= predicted_ns) {317idx = i;318continue;319}320321/*322* Use a physical idle state, not busy polling, unless a timer323* is going to trigger soon enough.324*/325if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&326s->target_residency_ns <= data->next_timer_ns) {327predicted_ns = s->target_residency_ns;328idx = i;329break;330}331332if (predicted_ns < TICK_NSEC)333break;334335if (!tick_nohz_tick_stopped()) {336/*337* If the state selected so far is shallow, waking up338* early won't hurt, so retain the tick in that case and339* let the governor run again in the next iteration of340* the idle loop.341*/342predicted_ns = drv->states[idx].target_residency_ns;343break;344}345346/*347* If the state selected so far is shallow and this state's348* target residency matches the time till the closest timer349* event, select this one to avoid getting stuck in the shallow350* one for too long.351*/352if (drv->states[idx].target_residency_ns < TICK_NSEC &&353s->target_residency_ns <= delta_tick)354idx = i;355356return idx;357}358359if (idx == -1)360idx = 0; /* No states enabled. Must use 0. */361362/*363* Don't stop the tick if the selected state is a polling one or if the364* expected idle duration is shorter than the tick period length.365*/366if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||367predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {368*stop_tick = false;369370if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) {371/*372* The tick is not going to be stopped and the target373* residency of the state to be returned is not within374* the time until the next timer event including the375* tick, so try to correct that.376*/377for (i = idx - 1; i >= 0; i--) {378if (dev->states_usage[i].disable)379continue;380381idx = i;382if (drv->states[i].target_residency_ns <= delta_tick)383break;384}385}386}387388return idx;389}390391/**392* menu_reflect - records that data structures need update393* @dev: the CPU394* @index: the index of actual entered state395*396* NOTE: it's important to be fast here because this operation will add to397* the overall exit latency.398*/399static void menu_reflect(struct cpuidle_device *dev, int index)400{401struct menu_device *data = this_cpu_ptr(&menu_devices);402403dev->last_state_idx = index;404data->needs_update = 1;405data->tick_wakeup = tick_nohz_idle_got_tick();406}407408/**409* menu_update - attempts to guess what happened after entry410* @drv: cpuidle driver containing state data411* @dev: the CPU412*/413static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)414{415struct menu_device *data = this_cpu_ptr(&menu_devices);416int last_idx = dev->last_state_idx;417struct cpuidle_state *target = &drv->states[last_idx];418u64 measured_ns;419unsigned int new_factor;420421/*422* Try to figure out how much time passed between entry to low423* power state and occurrence of the wakeup event.424*425* If the entered idle state didn't support residency measurements,426* we use them anyway if they are short, and if long,427* truncate to the whole expected time.428*429* Any measured amount of time will include the exit latency.430* Since we are interested in when the wakeup begun, not when it431* was completed, we must subtract the exit latency. However, if432* the measured amount of time is less than the exit latency,433* assume the state was never reached and the exit latency is 0.434*/435436if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {437/*438* The nohz code said that there wouldn't be any events within439* the tick boundary (if the tick was stopped), but the idle440* duration predictor had a differing opinion. Since the CPU441* was woken up by a tick (that wasn't stopped after all), the442* predictor was not quite right, so assume that the CPU could443* have been idle long (but not forever) to help the idle444* duration predictor do a better job next time.445*/446measured_ns = 9 * MAX_INTERESTING / 10;447} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&448dev->poll_time_limit) {449/*450* The CPU exited the "polling" state due to a time limit, so451* the idle duration prediction leading to the selection of that452* state was inaccurate. If a better prediction had been made,453* the CPU might have been woken up from idle by the next timer.454* Assume that to be the case.455*/456measured_ns = data->next_timer_ns;457} else {458/* measured value */459measured_ns = dev->last_residency_ns;460461/* Deduct exit latency */462if (measured_ns > 2 * target->exit_latency_ns)463measured_ns -= target->exit_latency_ns;464else465measured_ns /= 2;466}467468/* Make sure our coefficients do not exceed unity */469if (measured_ns > data->next_timer_ns)470measured_ns = data->next_timer_ns;471472/* Update our correction ratio */473new_factor = data->correction_factor[data->bucket];474new_factor -= new_factor / DECAY;475476if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)477new_factor += div64_u64(RESOLUTION * measured_ns,478data->next_timer_ns);479else480/*481* we were idle so long that we count it as a perfect482* prediction483*/484new_factor += RESOLUTION;485486/*487* We don't want 0 as factor; we always want at least488* a tiny bit of estimated time. Fortunately, due to rounding,489* new_factor will stay nonzero regardless of measured_us values490* and the compiler can eliminate this test as long as DECAY > 1.491*/492if (DECAY == 1 && unlikely(new_factor == 0))493new_factor = 1;494495data->correction_factor[data->bucket] = new_factor;496497menu_update_intervals(data, ktime_to_us(measured_ns));498}499500/**501* menu_enable_device - scans a CPU's states and does setup502* @drv: cpuidle driver503* @dev: the CPU504*/505static int menu_enable_device(struct cpuidle_driver *drv,506struct cpuidle_device *dev)507{508struct menu_device *data = &per_cpu(menu_devices, dev->cpu);509int i;510511memset(data, 0, sizeof(struct menu_device));512513/*514* if the correction factor is 0 (eg first time init or cpu hotplug515* etc), we actually want to start out with a unity factor.516*/517for(i = 0; i < BUCKETS; i++)518data->correction_factor[i] = RESOLUTION * DECAY;519520return 0;521}522523static struct cpuidle_governor menu_governor = {524.name = "menu",525.rating = 20,526.enable = menu_enable_device,527.select = menu_select,528.reflect = menu_reflect,529};530531/**532* init_menu - initializes the governor533*/534static int __init init_menu(void)535{536return cpuidle_register_governor(&menu_governor);537}538539postcore_initcall(init_menu);540541542