Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cpufreq/cpufreq.c
54335 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* linux/drivers/cpufreq/cpufreq.c
4
*
5
* Copyright (C) 2001 Russell King
6
* (C) 2002 - 2003 Dominik Brodowski <[email protected]>
7
* (C) 2013 Viresh Kumar <[email protected]>
8
*
9
* Oct 2005 - Ashok Raj <[email protected]>
10
* Added handling for CPU hotplug
11
* Feb 2006 - Jacob Shin <[email protected]>
12
* Fix handling for CPU hotplug -- affected CPUs
13
*/
14
15
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17
#include <linux/cpu.h>
18
#include <linux/cpufreq.h>
19
#include <linux/cpu_cooling.h>
20
#include <linux/delay.h>
21
#include <linux/device.h>
22
#include <linux/init.h>
23
#include <linux/kernel_stat.h>
24
#include <linux/module.h>
25
#include <linux/mutex.h>
26
#include <linux/pm_qos.h>
27
#include <linux/slab.h>
28
#include <linux/string_choices.h>
29
#include <linux/suspend.h>
30
#include <linux/syscore_ops.h>
31
#include <linux/tick.h>
32
#include <linux/units.h>
33
#include <trace/events/power.h>
34
35
static LIST_HEAD(cpufreq_policy_list);
36
37
/* Macros to iterate over CPU policies */
38
#define for_each_suitable_policy(__policy, __active) \
39
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
40
if ((__active) == !policy_is_inactive(__policy))
41
42
#define for_each_active_policy(__policy) \
43
for_each_suitable_policy(__policy, true)
44
#define for_each_inactive_policy(__policy) \
45
for_each_suitable_policy(__policy, false)
46
47
/* Iterate over governors */
48
static LIST_HEAD(cpufreq_governor_list);
49
#define for_each_governor(__governor) \
50
list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
51
52
static char default_governor[CPUFREQ_NAME_LEN];
53
54
/*
55
* The "cpufreq driver" - the arch- or hardware-dependent low
56
* level driver of CPUFreq support, and its spinlock. This lock
57
* also protects the cpufreq_cpu_data array.
58
*/
59
static struct cpufreq_driver *cpufreq_driver;
60
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
61
static DEFINE_RWLOCK(cpufreq_driver_lock);
62
63
static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
64
bool cpufreq_supports_freq_invariance(void)
65
{
66
return static_branch_likely(&cpufreq_freq_invariance);
67
}
68
69
/* Flag to suspend/resume CPUFreq governors */
70
static bool cpufreq_suspended;
71
72
static inline bool has_target(void)
73
{
74
return cpufreq_driver->target_index || cpufreq_driver->target;
75
}
76
77
bool has_target_index(void)
78
{
79
return !!cpufreq_driver->target_index;
80
}
81
82
/* internal prototypes */
83
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
84
static int cpufreq_init_governor(struct cpufreq_policy *policy);
85
static void cpufreq_exit_governor(struct cpufreq_policy *policy);
86
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
87
static int cpufreq_set_policy(struct cpufreq_policy *policy,
88
struct cpufreq_governor *new_gov,
89
unsigned int new_pol);
90
static bool cpufreq_boost_supported(void);
91
static int cpufreq_boost_trigger_state(int state);
92
93
/*
94
* Two notifier lists: the "policy" list is involved in the
95
* validation process for a new CPU frequency policy; the
96
* "transition" list for kernel code that needs to handle
97
* changes to devices when the CPU clock speed changes.
98
* The mutex locks both lists.
99
*/
100
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
101
SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
102
103
static int off __read_mostly;
104
static int cpufreq_disabled(void)
105
{
106
return off;
107
}
108
void disable_cpufreq(void)
109
{
110
off = 1;
111
}
112
EXPORT_SYMBOL_GPL(disable_cpufreq);
113
114
static DEFINE_MUTEX(cpufreq_governor_mutex);
115
116
bool have_governor_per_policy(void)
117
{
118
return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
119
}
120
EXPORT_SYMBOL_GPL(have_governor_per_policy);
121
122
static struct kobject *cpufreq_global_kobject;
123
124
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
125
{
126
if (have_governor_per_policy())
127
return &policy->kobj;
128
else
129
return cpufreq_global_kobject;
130
}
131
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
132
133
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
134
{
135
struct kernel_cpustat kcpustat;
136
u64 cur_wall_time;
137
u64 idle_time;
138
u64 busy_time;
139
140
cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
141
142
kcpustat_cpu_fetch(&kcpustat, cpu);
143
144
busy_time = kcpustat.cpustat[CPUTIME_USER];
145
busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
146
busy_time += kcpustat.cpustat[CPUTIME_IRQ];
147
busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
148
busy_time += kcpustat.cpustat[CPUTIME_STEAL];
149
busy_time += kcpustat.cpustat[CPUTIME_NICE];
150
151
idle_time = cur_wall_time - busy_time;
152
if (wall)
153
*wall = div_u64(cur_wall_time, NSEC_PER_USEC);
154
155
return div_u64(idle_time, NSEC_PER_USEC);
156
}
157
158
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
159
{
160
u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
161
162
if (idle_time == -1ULL)
163
return get_cpu_idle_time_jiffy(cpu, wall);
164
else if (!io_busy)
165
idle_time += get_cpu_iowait_time_us(cpu, wall);
166
167
return idle_time;
168
}
169
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
170
171
/*
172
* This is a generic cpufreq init() routine which can be used by cpufreq
173
* drivers of SMP systems. It will do following:
174
* - validate & show freq table passed
175
* - set policies transition latency
176
* - policy->cpus with all possible CPUs
177
*/
178
void cpufreq_generic_init(struct cpufreq_policy *policy,
179
struct cpufreq_frequency_table *table,
180
unsigned int transition_latency)
181
{
182
policy->freq_table = table;
183
policy->cpuinfo.transition_latency = transition_latency;
184
185
/*
186
* The driver only supports the SMP configuration where all processors
187
* share the clock and voltage and clock.
188
*/
189
cpumask_setall(policy->cpus);
190
}
191
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
192
193
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
194
{
195
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
196
197
return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
198
}
199
EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
200
201
struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu)
202
{
203
return per_cpu(cpufreq_cpu_data, cpu);
204
}
205
EXPORT_SYMBOL_GPL(cpufreq_cpu_policy);
206
207
unsigned int cpufreq_generic_get(unsigned int cpu)
208
{
209
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
210
211
if (!policy || IS_ERR(policy->clk)) {
212
pr_err("%s: No %s associated to cpu: %d\n",
213
__func__, policy ? "clk" : "policy", cpu);
214
return 0;
215
}
216
217
return clk_get_rate(policy->clk) / 1000;
218
}
219
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
220
221
/**
222
* cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
223
* @cpu: CPU to find the policy for.
224
*
225
* Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
226
* the kobject reference counter of that policy. Return a valid policy on
227
* success or NULL on failure.
228
*
229
* The policy returned by this function has to be released with the help of
230
* cpufreq_cpu_put() to balance its kobject reference counter properly.
231
*/
232
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
233
{
234
struct cpufreq_policy *policy = NULL;
235
unsigned long flags;
236
237
if (WARN_ON(cpu >= nr_cpu_ids))
238
return NULL;
239
240
/* get the cpufreq driver */
241
read_lock_irqsave(&cpufreq_driver_lock, flags);
242
243
if (cpufreq_driver) {
244
/* get the CPU */
245
policy = cpufreq_cpu_get_raw(cpu);
246
if (policy)
247
kobject_get(&policy->kobj);
248
}
249
250
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
251
252
return policy;
253
}
254
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
255
256
/**
257
* cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
258
* @policy: cpufreq policy returned by cpufreq_cpu_get().
259
*/
260
void cpufreq_cpu_put(struct cpufreq_policy *policy)
261
{
262
kobject_put(&policy->kobj);
263
}
264
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
265
266
/*********************************************************************
267
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
268
*********************************************************************/
269
270
/**
271
* adjust_jiffies - Adjust the system "loops_per_jiffy".
272
* @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
273
* @ci: Frequency change information.
274
*
275
* This function alters the system "loops_per_jiffy" for the clock
276
* speed change. Note that loops_per_jiffy cannot be updated on SMP
277
* systems as each CPU might be scaled differently. So, use the arch
278
* per-CPU loops_per_jiffy value wherever possible.
279
*/
280
static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
281
{
282
#ifndef CONFIG_SMP
283
static unsigned long l_p_j_ref;
284
static unsigned int l_p_j_ref_freq;
285
286
if (ci->flags & CPUFREQ_CONST_LOOPS)
287
return;
288
289
if (!l_p_j_ref_freq) {
290
l_p_j_ref = loops_per_jiffy;
291
l_p_j_ref_freq = ci->old;
292
pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
293
l_p_j_ref, l_p_j_ref_freq);
294
}
295
if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
296
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
297
ci->new);
298
pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
299
loops_per_jiffy, ci->new);
300
}
301
#endif
302
}
303
304
/**
305
* cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
306
* @policy: cpufreq policy to enable fast frequency switching for.
307
* @freqs: contain details of the frequency update.
308
* @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
309
*
310
* This function calls the transition notifiers and adjust_jiffies().
311
*
312
* It is called twice on all CPU frequency changes that have external effects.
313
*/
314
static void cpufreq_notify_transition(struct cpufreq_policy *policy,
315
struct cpufreq_freqs *freqs,
316
unsigned int state)
317
{
318
int cpu;
319
320
BUG_ON(irqs_disabled());
321
322
if (cpufreq_disabled())
323
return;
324
325
freqs->policy = policy;
326
freqs->flags = cpufreq_driver->flags;
327
pr_debug("notification %u of frequency transition to %u kHz\n",
328
state, freqs->new);
329
330
switch (state) {
331
case CPUFREQ_PRECHANGE:
332
/*
333
* Detect if the driver reported a value as "old frequency"
334
* which is not equal to what the cpufreq core thinks is
335
* "old frequency".
336
*/
337
if (policy->cur && policy->cur != freqs->old) {
338
pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
339
freqs->old, policy->cur);
340
freqs->old = policy->cur;
341
}
342
343
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
344
CPUFREQ_PRECHANGE, freqs);
345
346
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
347
break;
348
349
case CPUFREQ_POSTCHANGE:
350
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
351
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
352
cpumask_pr_args(policy->cpus));
353
354
for_each_cpu(cpu, policy->cpus)
355
trace_cpu_frequency(freqs->new, cpu);
356
357
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
358
CPUFREQ_POSTCHANGE, freqs);
359
360
cpufreq_stats_record_transition(policy, freqs->new);
361
policy->cur = freqs->new;
362
}
363
}
364
365
/* Do post notifications when there are chances that transition has failed */
366
static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
367
struct cpufreq_freqs *freqs, int transition_failed)
368
{
369
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
370
if (!transition_failed)
371
return;
372
373
swap(freqs->old, freqs->new);
374
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
375
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
376
}
377
378
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
379
struct cpufreq_freqs *freqs)
380
{
381
382
/*
383
* Catch double invocations of _begin() which lead to self-deadlock.
384
* ASYNC_NOTIFICATION drivers are left out because the cpufreq core
385
* doesn't invoke _begin() on their behalf, and hence the chances of
386
* double invocations are very low. Moreover, there are scenarios
387
* where these checks can emit false-positive warnings in these
388
* drivers; so we avoid that by skipping them altogether.
389
*/
390
WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
391
&& current == policy->transition_task);
392
393
wait:
394
wait_event(policy->transition_wait, !policy->transition_ongoing);
395
396
spin_lock(&policy->transition_lock);
397
398
if (unlikely(policy->transition_ongoing)) {
399
spin_unlock(&policy->transition_lock);
400
goto wait;
401
}
402
403
policy->transition_ongoing = true;
404
policy->transition_task = current;
405
406
spin_unlock(&policy->transition_lock);
407
408
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
409
}
410
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
411
412
void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
413
struct cpufreq_freqs *freqs, int transition_failed)
414
{
415
if (WARN_ON(!policy->transition_ongoing))
416
return;
417
418
cpufreq_notify_post_transition(policy, freqs, transition_failed);
419
420
arch_set_freq_scale(policy->related_cpus,
421
policy->cur,
422
arch_scale_freq_ref(policy->cpu));
423
424
spin_lock(&policy->transition_lock);
425
policy->transition_ongoing = false;
426
policy->transition_task = NULL;
427
spin_unlock(&policy->transition_lock);
428
429
wake_up(&policy->transition_wait);
430
}
431
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
432
433
/*
434
* Fast frequency switching status count. Positive means "enabled", negative
435
* means "disabled" and 0 means "not decided yet".
436
*/
437
static int cpufreq_fast_switch_count;
438
static DEFINE_MUTEX(cpufreq_fast_switch_lock);
439
440
static void cpufreq_list_transition_notifiers(void)
441
{
442
struct notifier_block *nb;
443
444
pr_info("Registered transition notifiers:\n");
445
446
mutex_lock(&cpufreq_transition_notifier_list.mutex);
447
448
for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
449
pr_info("%pS\n", nb->notifier_call);
450
451
mutex_unlock(&cpufreq_transition_notifier_list.mutex);
452
}
453
454
/**
455
* cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
456
* @policy: cpufreq policy to enable fast frequency switching for.
457
*
458
* Try to enable fast frequency switching for @policy.
459
*
460
* The attempt will fail if there is at least one transition notifier registered
461
* at this point, as fast frequency switching is quite fundamentally at odds
462
* with transition notifiers. Thus if successful, it will make registration of
463
* transition notifiers fail going forward.
464
*/
465
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
466
{
467
lockdep_assert_held(&policy->rwsem);
468
469
if (!policy->fast_switch_possible)
470
return;
471
472
mutex_lock(&cpufreq_fast_switch_lock);
473
if (cpufreq_fast_switch_count >= 0) {
474
cpufreq_fast_switch_count++;
475
policy->fast_switch_enabled = true;
476
} else {
477
pr_warn("CPU%u: Fast frequency switching not enabled\n",
478
policy->cpu);
479
cpufreq_list_transition_notifiers();
480
}
481
mutex_unlock(&cpufreq_fast_switch_lock);
482
}
483
EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
484
485
/**
486
* cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
487
* @policy: cpufreq policy to disable fast frequency switching for.
488
*/
489
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
490
{
491
mutex_lock(&cpufreq_fast_switch_lock);
492
if (policy->fast_switch_enabled) {
493
policy->fast_switch_enabled = false;
494
if (!WARN_ON(cpufreq_fast_switch_count <= 0))
495
cpufreq_fast_switch_count--;
496
}
497
mutex_unlock(&cpufreq_fast_switch_lock);
498
}
499
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
500
501
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
502
unsigned int target_freq,
503
unsigned int min, unsigned int max,
504
unsigned int relation)
505
{
506
unsigned int idx;
507
508
target_freq = clamp_val(target_freq, min, max);
509
510
if (!policy->freq_table)
511
return target_freq;
512
513
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
514
policy->cached_resolved_idx = idx;
515
policy->cached_target_freq = target_freq;
516
return policy->freq_table[idx].frequency;
517
}
518
519
/**
520
* cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
521
* one.
522
* @policy: associated policy to interrogate
523
* @target_freq: target frequency to resolve.
524
*
525
* The target to driver frequency mapping is cached in the policy.
526
*
527
* Return: Lowest driver-supported frequency greater than or equal to the
528
* given target_freq, subject to policy (min/max) and driver limitations.
529
*/
530
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
531
unsigned int target_freq)
532
{
533
unsigned int min = READ_ONCE(policy->min);
534
unsigned int max = READ_ONCE(policy->max);
535
536
/*
537
* If this function runs in parallel with cpufreq_set_policy(), it may
538
* read policy->min before the update and policy->max after the update
539
* or the other way around, so there is no ordering guarantee.
540
*
541
* Resolve this by always honoring the max (in case it comes from
542
* thermal throttling or similar).
543
*/
544
if (unlikely(min > max))
545
min = max;
546
547
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
548
}
549
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
550
551
unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
552
{
553
unsigned int latency;
554
555
if (policy->transition_delay_us)
556
return policy->transition_delay_us;
557
558
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
559
if (latency)
560
/* Give a 50% breathing room between updates */
561
return latency + (latency >> 1);
562
563
return USEC_PER_MSEC;
564
}
565
EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
566
567
/*********************************************************************
568
* SYSFS INTERFACE *
569
*********************************************************************/
570
static ssize_t show_boost(struct kobject *kobj,
571
struct kobj_attribute *attr, char *buf)
572
{
573
return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
574
}
575
576
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
577
const char *buf, size_t count)
578
{
579
bool enable;
580
581
if (kstrtobool(buf, &enable))
582
return -EINVAL;
583
584
if (cpufreq_boost_trigger_state(enable)) {
585
pr_err("%s: Cannot %s BOOST!\n",
586
__func__, str_enable_disable(enable));
587
return -EINVAL;
588
}
589
590
pr_debug("%s: cpufreq BOOST %s\n",
591
__func__, str_enabled_disabled(enable));
592
593
return count;
594
}
595
define_one_global_rw(boost);
596
597
static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
598
{
599
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
600
}
601
602
static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
603
{
604
int ret;
605
606
if (policy->boost_enabled == enable)
607
return 0;
608
609
policy->boost_enabled = enable;
610
611
ret = cpufreq_driver->set_boost(policy, enable);
612
if (ret)
613
policy->boost_enabled = !policy->boost_enabled;
614
615
return ret;
616
}
617
618
static ssize_t store_local_boost(struct cpufreq_policy *policy,
619
const char *buf, size_t count)
620
{
621
int ret;
622
bool enable;
623
624
if (kstrtobool(buf, &enable))
625
return -EINVAL;
626
627
if (!cpufreq_driver->boost_enabled)
628
return -EINVAL;
629
630
if (!policy->boost_supported)
631
return -EINVAL;
632
633
ret = policy_set_boost(policy, enable);
634
if (!ret)
635
return count;
636
637
return ret;
638
}
639
640
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
641
642
static struct cpufreq_governor *find_governor(const char *str_governor)
643
{
644
struct cpufreq_governor *t;
645
646
for_each_governor(t)
647
if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
648
return t;
649
650
return NULL;
651
}
652
653
static struct cpufreq_governor *get_governor(const char *str_governor)
654
{
655
struct cpufreq_governor *t;
656
657
mutex_lock(&cpufreq_governor_mutex);
658
t = find_governor(str_governor);
659
if (!t)
660
goto unlock;
661
662
if (!try_module_get(t->owner))
663
t = NULL;
664
665
unlock:
666
mutex_unlock(&cpufreq_governor_mutex);
667
668
return t;
669
}
670
671
static unsigned int cpufreq_parse_policy(char *str_governor)
672
{
673
if (!strncasecmp(str_governor, "performance", strlen("performance")))
674
return CPUFREQ_POLICY_PERFORMANCE;
675
676
if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
677
return CPUFREQ_POLICY_POWERSAVE;
678
679
return CPUFREQ_POLICY_UNKNOWN;
680
}
681
682
/**
683
* cpufreq_parse_governor - parse a governor string only for has_target()
684
* @str_governor: Governor name.
685
*/
686
static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
687
{
688
struct cpufreq_governor *t;
689
690
t = get_governor(str_governor);
691
if (t)
692
return t;
693
694
if (request_module("cpufreq_%s", str_governor))
695
return NULL;
696
697
return get_governor(str_governor);
698
}
699
700
/*
701
* cpufreq_per_cpu_attr_read() / show_##file_name() -
702
* print out cpufreq information
703
*
704
* Write out information from cpufreq_driver->policy[cpu]; object must be
705
* "unsigned int".
706
*/
707
708
#define show_one(file_name, object) \
709
static ssize_t show_##file_name \
710
(struct cpufreq_policy *policy, char *buf) \
711
{ \
712
return sysfs_emit(buf, "%u\n", policy->object); \
713
}
714
715
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
716
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
717
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
718
show_one(scaling_min_freq, min);
719
show_one(scaling_max_freq, max);
720
721
__weak int arch_freq_get_on_cpu(int cpu)
722
{
723
return -EOPNOTSUPP;
724
}
725
726
static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
727
{
728
return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
729
}
730
731
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
732
{
733
ssize_t ret;
734
int freq;
735
736
freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
737
? arch_freq_get_on_cpu(policy->cpu)
738
: 0;
739
740
if (freq > 0)
741
ret = sysfs_emit(buf, "%u\n", freq);
742
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
743
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
744
else
745
ret = sysfs_emit(buf, "%u\n", policy->cur);
746
return ret;
747
}
748
749
/*
750
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
751
*/
752
#define store_one(file_name, object) \
753
static ssize_t store_##file_name \
754
(struct cpufreq_policy *policy, const char *buf, size_t count) \
755
{ \
756
unsigned long val; \
757
int ret; \
758
\
759
ret = kstrtoul(buf, 0, &val); \
760
if (ret) \
761
return ret; \
762
\
763
ret = freq_qos_update_request(policy->object##_freq_req, val);\
764
return ret >= 0 ? count : ret; \
765
}
766
767
store_one(scaling_min_freq, min);
768
store_one(scaling_max_freq, max);
769
770
/*
771
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
772
*/
773
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
774
char *buf)
775
{
776
unsigned int cur_freq = __cpufreq_get(policy);
777
778
if (cur_freq)
779
return sysfs_emit(buf, "%u\n", cur_freq);
780
781
return sysfs_emit(buf, "<unknown>\n");
782
}
783
784
/*
785
* show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
786
*/
787
static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
788
char *buf)
789
{
790
int avg_freq = arch_freq_get_on_cpu(policy->cpu);
791
792
if (avg_freq > 0)
793
return sysfs_emit(buf, "%u\n", avg_freq);
794
return avg_freq != 0 ? avg_freq : -EINVAL;
795
}
796
797
/*
798
* show_scaling_governor - show the current policy for the specified CPU
799
*/
800
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
801
{
802
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
803
return sysfs_emit(buf, "powersave\n");
804
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
805
return sysfs_emit(buf, "performance\n");
806
else if (policy->governor)
807
return sysfs_emit(buf, "%s\n", policy->governor->name);
808
return -EINVAL;
809
}
810
811
/*
812
* store_scaling_governor - store policy for the specified CPU
813
*/
814
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
815
const char *buf, size_t count)
816
{
817
char str_governor[CPUFREQ_NAME_LEN];
818
int ret;
819
820
ret = sscanf(buf, "%15s", str_governor);
821
if (ret != 1)
822
return -EINVAL;
823
824
if (cpufreq_driver->setpolicy) {
825
unsigned int new_pol;
826
827
new_pol = cpufreq_parse_policy(str_governor);
828
if (!new_pol)
829
return -EINVAL;
830
831
ret = cpufreq_set_policy(policy, NULL, new_pol);
832
} else {
833
struct cpufreq_governor *new_gov;
834
835
new_gov = cpufreq_parse_governor(str_governor);
836
if (!new_gov)
837
return -EINVAL;
838
839
ret = cpufreq_set_policy(policy, new_gov,
840
CPUFREQ_POLICY_UNKNOWN);
841
842
module_put(new_gov->owner);
843
}
844
845
return ret ? ret : count;
846
}
847
848
/*
849
* show_scaling_driver - show the cpufreq driver currently loaded
850
*/
851
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
852
{
853
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
854
}
855
856
/*
857
* show_scaling_available_governors - show the available CPUfreq governors
858
*/
859
static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
860
char *buf)
861
{
862
ssize_t i = 0;
863
struct cpufreq_governor *t;
864
865
if (!has_target()) {
866
i += sysfs_emit(buf, "performance powersave");
867
goto out;
868
}
869
870
mutex_lock(&cpufreq_governor_mutex);
871
for_each_governor(t) {
872
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
873
- (CPUFREQ_NAME_LEN + 2)))
874
break;
875
i += sysfs_emit_at(buf, i, "%s ", t->name);
876
}
877
mutex_unlock(&cpufreq_governor_mutex);
878
out:
879
i += sysfs_emit_at(buf, i, "\n");
880
return i;
881
}
882
883
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
884
{
885
ssize_t i = 0;
886
unsigned int cpu;
887
888
for_each_cpu(cpu, mask) {
889
i += sysfs_emit_at(buf, i, "%u ", cpu);
890
if (i >= (PAGE_SIZE - 5))
891
break;
892
}
893
894
/* Remove the extra space at the end */
895
i--;
896
897
i += sysfs_emit_at(buf, i, "\n");
898
return i;
899
}
900
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
901
902
/*
903
* show_related_cpus - show the CPUs affected by each transition even if
904
* hw coordination is in use
905
*/
906
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
907
{
908
return cpufreq_show_cpus(policy->related_cpus, buf);
909
}
910
911
/*
912
* show_affected_cpus - show the CPUs affected by each transition
913
*/
914
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
915
{
916
return cpufreq_show_cpus(policy->cpus, buf);
917
}
918
919
static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
920
const char *buf, size_t count)
921
{
922
unsigned int freq = 0;
923
int ret;
924
925
if (!policy->governor || !policy->governor->store_setspeed)
926
return -EINVAL;
927
928
ret = kstrtouint(buf, 0, &freq);
929
if (ret)
930
return ret;
931
932
policy->governor->store_setspeed(policy, freq);
933
934
return count;
935
}
936
937
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
938
{
939
if (!policy->governor || !policy->governor->show_setspeed)
940
return sysfs_emit(buf, "<unsupported>\n");
941
942
return policy->governor->show_setspeed(policy, buf);
943
}
944
945
/*
946
* show_bios_limit - show the current cpufreq HW/BIOS limitation
947
*/
948
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
949
{
950
unsigned int limit;
951
int ret;
952
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
953
if (!ret)
954
return sysfs_emit(buf, "%u\n", limit);
955
return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
956
}
957
958
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
959
cpufreq_freq_attr_ro(cpuinfo_avg_freq);
960
cpufreq_freq_attr_ro(cpuinfo_min_freq);
961
cpufreq_freq_attr_ro(cpuinfo_max_freq);
962
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
963
cpufreq_freq_attr_ro(scaling_available_governors);
964
cpufreq_freq_attr_ro(scaling_driver);
965
cpufreq_freq_attr_ro(scaling_cur_freq);
966
cpufreq_freq_attr_ro(bios_limit);
967
cpufreq_freq_attr_ro(related_cpus);
968
cpufreq_freq_attr_ro(affected_cpus);
969
cpufreq_freq_attr_rw(scaling_min_freq);
970
cpufreq_freq_attr_rw(scaling_max_freq);
971
cpufreq_freq_attr_rw(scaling_governor);
972
cpufreq_freq_attr_rw(scaling_setspeed);
973
974
static struct attribute *cpufreq_attrs[] = {
975
&cpuinfo_min_freq.attr,
976
&cpuinfo_max_freq.attr,
977
&cpuinfo_transition_latency.attr,
978
&scaling_cur_freq.attr,
979
&scaling_min_freq.attr,
980
&scaling_max_freq.attr,
981
&affected_cpus.attr,
982
&related_cpus.attr,
983
&scaling_governor.attr,
984
&scaling_driver.attr,
985
&scaling_available_governors.attr,
986
&scaling_setspeed.attr,
987
NULL
988
};
989
ATTRIBUTE_GROUPS(cpufreq);
990
991
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
992
#define to_attr(a) container_of(a, struct freq_attr, attr)
993
994
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
995
{
996
struct cpufreq_policy *policy = to_policy(kobj);
997
struct freq_attr *fattr = to_attr(attr);
998
999
if (!fattr->show)
1000
return -EIO;
1001
1002
guard(cpufreq_policy_read)(policy);
1003
1004
if (likely(!policy_is_inactive(policy)))
1005
return fattr->show(policy, buf);
1006
1007
return -EBUSY;
1008
}
1009
1010
static ssize_t store(struct kobject *kobj, struct attribute *attr,
1011
const char *buf, size_t count)
1012
{
1013
struct cpufreq_policy *policy = to_policy(kobj);
1014
struct freq_attr *fattr = to_attr(attr);
1015
1016
if (!fattr->store)
1017
return -EIO;
1018
1019
guard(cpufreq_policy_write)(policy);
1020
1021
if (likely(!policy_is_inactive(policy)))
1022
return fattr->store(policy, buf, count);
1023
1024
return -EBUSY;
1025
}
1026
1027
static void cpufreq_sysfs_release(struct kobject *kobj)
1028
{
1029
struct cpufreq_policy *policy = to_policy(kobj);
1030
pr_debug("last reference is dropped\n");
1031
complete(&policy->kobj_unregister);
1032
}
1033
1034
static const struct sysfs_ops sysfs_ops = {
1035
.show = show,
1036
.store = store,
1037
};
1038
1039
static const struct kobj_type ktype_cpufreq = {
1040
.sysfs_ops = &sysfs_ops,
1041
.default_groups = cpufreq_groups,
1042
.release = cpufreq_sysfs_release,
1043
};
1044
1045
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1046
struct device *dev)
1047
{
1048
if (unlikely(!dev))
1049
return;
1050
1051
if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1052
return;
1053
1054
dev_dbg(dev, "%s: Adding symlink\n", __func__);
1055
if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1056
dev_err(dev, "cpufreq symlink creation failed\n");
1057
}
1058
1059
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1060
struct device *dev)
1061
{
1062
dev_dbg(dev, "%s: Removing symlink\n", __func__);
1063
sysfs_remove_link(&dev->kobj, "cpufreq");
1064
cpumask_clear_cpu(cpu, policy->real_cpus);
1065
}
1066
1067
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1068
{
1069
struct freq_attr **drv_attr;
1070
int ret = 0;
1071
1072
/* Attributes that need freq_table */
1073
if (policy->freq_table) {
1074
ret = sysfs_create_file(&policy->kobj,
1075
&cpufreq_freq_attr_scaling_available_freqs.attr);
1076
if (ret)
1077
return ret;
1078
1079
if (cpufreq_boost_supported()) {
1080
ret = sysfs_create_file(&policy->kobj,
1081
&cpufreq_freq_attr_scaling_boost_freqs.attr);
1082
if (ret)
1083
return ret;
1084
}
1085
}
1086
1087
/* set up files for this cpu device */
1088
drv_attr = cpufreq_driver->attr;
1089
while (drv_attr && *drv_attr) {
1090
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1091
if (ret)
1092
return ret;
1093
drv_attr++;
1094
}
1095
if (cpufreq_driver->get) {
1096
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1097
if (ret)
1098
return ret;
1099
}
1100
1101
if (cpufreq_avg_freq_supported(policy)) {
1102
ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
1103
if (ret)
1104
return ret;
1105
}
1106
1107
if (cpufreq_driver->bios_limit) {
1108
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1109
if (ret)
1110
return ret;
1111
}
1112
1113
if (cpufreq_boost_supported()) {
1114
ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
1115
if (ret)
1116
return ret;
1117
}
1118
1119
return 0;
1120
}
1121
1122
static int cpufreq_init_policy(struct cpufreq_policy *policy)
1123
{
1124
struct cpufreq_governor *gov = NULL;
1125
unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1126
int ret;
1127
1128
if (has_target()) {
1129
/* Update policy governor to the one used before hotplug. */
1130
if (policy->last_governor[0] != '\0')
1131
gov = get_governor(policy->last_governor);
1132
if (gov) {
1133
pr_debug("Restoring governor %s for cpu %d\n",
1134
gov->name, policy->cpu);
1135
} else {
1136
gov = get_governor(default_governor);
1137
}
1138
1139
if (!gov) {
1140
gov = cpufreq_default_governor();
1141
__module_get(gov->owner);
1142
}
1143
1144
} else {
1145
1146
/* Use the default policy if there is no last_policy. */
1147
if (policy->last_policy) {
1148
pol = policy->last_policy;
1149
} else {
1150
pol = cpufreq_parse_policy(default_governor);
1151
/*
1152
* In case the default governor is neither "performance"
1153
* nor "powersave", fall back to the initial policy
1154
* value set by the driver.
1155
*/
1156
if (pol == CPUFREQ_POLICY_UNKNOWN)
1157
pol = policy->policy;
1158
}
1159
if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1160
pol != CPUFREQ_POLICY_POWERSAVE)
1161
return -ENODATA;
1162
}
1163
1164
ret = cpufreq_set_policy(policy, gov, pol);
1165
if (gov)
1166
module_put(gov->owner);
1167
1168
return ret;
1169
}
1170
1171
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1172
{
1173
int ret = 0;
1174
1175
/* Has this CPU been taken care of already? */
1176
if (cpumask_test_cpu(cpu, policy->cpus))
1177
return 0;
1178
1179
guard(cpufreq_policy_write)(policy);
1180
1181
if (has_target())
1182
cpufreq_stop_governor(policy);
1183
1184
cpumask_set_cpu(cpu, policy->cpus);
1185
1186
if (has_target()) {
1187
ret = cpufreq_start_governor(policy);
1188
if (ret)
1189
pr_err("%s: Failed to start governor\n", __func__);
1190
}
1191
1192
return ret;
1193
}
1194
1195
void refresh_frequency_limits(struct cpufreq_policy *policy)
1196
{
1197
if (!policy_is_inactive(policy)) {
1198
pr_debug("updating policy for CPU %u\n", policy->cpu);
1199
1200
cpufreq_set_policy(policy, policy->governor, policy->policy);
1201
}
1202
}
1203
EXPORT_SYMBOL(refresh_frequency_limits);
1204
1205
static void handle_update(struct work_struct *work)
1206
{
1207
struct cpufreq_policy *policy =
1208
container_of(work, struct cpufreq_policy, update);
1209
1210
pr_debug("handle_update for cpu %u called\n", policy->cpu);
1211
1212
guard(cpufreq_policy_write)(policy);
1213
1214
refresh_frequency_limits(policy);
1215
}
1216
1217
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1218
void *data)
1219
{
1220
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1221
1222
schedule_work(&policy->update);
1223
return 0;
1224
}
1225
1226
static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1227
void *data)
1228
{
1229
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1230
1231
schedule_work(&policy->update);
1232
return 0;
1233
}
1234
1235
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1236
{
1237
struct kobject *kobj;
1238
struct completion *cmp;
1239
1240
scoped_guard(cpufreq_policy_write, policy) {
1241
cpufreq_stats_free_table(policy);
1242
kobj = &policy->kobj;
1243
cmp = &policy->kobj_unregister;
1244
}
1245
kobject_put(kobj);
1246
1247
/*
1248
* We need to make sure that the underlying kobj is
1249
* actually not referenced anymore by anybody before we
1250
* proceed with unloading.
1251
*/
1252
pr_debug("waiting for dropping of refcount\n");
1253
wait_for_completion(cmp);
1254
pr_debug("wait complete\n");
1255
}
1256
1257
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1258
{
1259
struct cpufreq_policy *policy;
1260
struct device *dev = get_cpu_device(cpu);
1261
int ret;
1262
1263
if (!dev)
1264
return NULL;
1265
1266
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1267
if (!policy)
1268
return NULL;
1269
1270
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1271
goto err_free_policy;
1272
1273
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1274
goto err_free_cpumask;
1275
1276
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1277
goto err_free_rcpumask;
1278
1279
init_completion(&policy->kobj_unregister);
1280
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1281
cpufreq_global_kobject, "policy%u", cpu);
1282
if (ret) {
1283
dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1284
/*
1285
* The entire policy object will be freed below, but the extra
1286
* memory allocated for the kobject name needs to be freed by
1287
* releasing the kobject.
1288
*/
1289
kobject_put(&policy->kobj);
1290
goto err_free_real_cpus;
1291
}
1292
1293
init_rwsem(&policy->rwsem);
1294
1295
freq_constraints_init(&policy->constraints);
1296
1297
policy->nb_min.notifier_call = cpufreq_notifier_min;
1298
policy->nb_max.notifier_call = cpufreq_notifier_max;
1299
1300
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1301
&policy->nb_min);
1302
if (ret) {
1303
dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
1304
ret, cpu);
1305
goto err_kobj_remove;
1306
}
1307
1308
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1309
&policy->nb_max);
1310
if (ret) {
1311
dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
1312
ret, cpu);
1313
goto err_min_qos_notifier;
1314
}
1315
1316
INIT_LIST_HEAD(&policy->policy_list);
1317
spin_lock_init(&policy->transition_lock);
1318
init_waitqueue_head(&policy->transition_wait);
1319
INIT_WORK(&policy->update, handle_update);
1320
1321
return policy;
1322
1323
err_min_qos_notifier:
1324
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1325
&policy->nb_min);
1326
err_kobj_remove:
1327
cpufreq_policy_put_kobj(policy);
1328
err_free_real_cpus:
1329
free_cpumask_var(policy->real_cpus);
1330
err_free_rcpumask:
1331
free_cpumask_var(policy->related_cpus);
1332
err_free_cpumask:
1333
free_cpumask_var(policy->cpus);
1334
err_free_policy:
1335
kfree(policy);
1336
1337
return NULL;
1338
}
1339
1340
static void cpufreq_policy_free(struct cpufreq_policy *policy)
1341
{
1342
unsigned long flags;
1343
int cpu;
1344
1345
/*
1346
* The callers must ensure the policy is inactive by now, to avoid any
1347
* races with show()/store() callbacks.
1348
*/
1349
if (unlikely(!policy_is_inactive(policy)))
1350
pr_warn("%s: Freeing active policy\n", __func__);
1351
1352
/* Remove policy from list */
1353
write_lock_irqsave(&cpufreq_driver_lock, flags);
1354
list_del(&policy->policy_list);
1355
1356
for_each_cpu(cpu, policy->related_cpus)
1357
per_cpu(cpufreq_cpu_data, cpu) = NULL;
1358
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1359
1360
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1361
&policy->nb_max);
1362
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1363
&policy->nb_min);
1364
1365
/* Cancel any pending policy->update work before freeing the policy. */
1366
cancel_work_sync(&policy->update);
1367
1368
if (policy->max_freq_req) {
1369
/*
1370
* Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1371
* notification, since CPUFREQ_CREATE_POLICY notification was
1372
* sent after adding max_freq_req earlier.
1373
*/
1374
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1375
CPUFREQ_REMOVE_POLICY, policy);
1376
freq_qos_remove_request(policy->max_freq_req);
1377
}
1378
1379
freq_qos_remove_request(policy->min_freq_req);
1380
kfree(policy->min_freq_req);
1381
1382
cpufreq_policy_put_kobj(policy);
1383
free_cpumask_var(policy->real_cpus);
1384
free_cpumask_var(policy->related_cpus);
1385
free_cpumask_var(policy->cpus);
1386
kfree(policy);
1387
}
1388
1389
static int cpufreq_policy_online(struct cpufreq_policy *policy,
1390
unsigned int cpu, bool new_policy)
1391
{
1392
unsigned long flags;
1393
unsigned int j;
1394
int ret;
1395
1396
guard(cpufreq_policy_write)(policy);
1397
1398
policy->cpu = cpu;
1399
policy->governor = NULL;
1400
1401
if (!new_policy && cpufreq_driver->online) {
1402
/* Recover policy->cpus using related_cpus */
1403
cpumask_copy(policy->cpus, policy->related_cpus);
1404
1405
ret = cpufreq_driver->online(policy);
1406
if (ret) {
1407
pr_debug("%s: %d: initialization failed\n", __func__,
1408
__LINE__);
1409
goto out_exit_policy;
1410
}
1411
} else {
1412
cpumask_copy(policy->cpus, cpumask_of(cpu));
1413
1414
/*
1415
* Call driver. From then on the cpufreq must be able
1416
* to accept all calls to ->verify and ->setpolicy for this CPU.
1417
*/
1418
ret = cpufreq_driver->init(policy);
1419
if (ret) {
1420
pr_debug("%s: %d: initialization failed\n", __func__,
1421
__LINE__);
1422
goto out_clear_policy;
1423
}
1424
1425
/*
1426
* The initialization has succeeded and the policy is online.
1427
* If there is a problem with its frequency table, take it
1428
* offline and drop it.
1429
*/
1430
if (policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_ASCENDING &&
1431
policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_DESCENDING) {
1432
ret = cpufreq_table_validate_and_sort(policy);
1433
if (ret)
1434
goto out_offline_policy;
1435
}
1436
1437
/* related_cpus should at least include policy->cpus. */
1438
cpumask_copy(policy->related_cpus, policy->cpus);
1439
}
1440
1441
/*
1442
* affected cpus must always be the one, which are online. We aren't
1443
* managing offline cpus here.
1444
*/
1445
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1446
1447
if (new_policy) {
1448
for_each_cpu(j, policy->related_cpus) {
1449
per_cpu(cpufreq_cpu_data, j) = policy;
1450
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1451
}
1452
1453
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1454
GFP_KERNEL);
1455
if (!policy->min_freq_req) {
1456
ret = -ENOMEM;
1457
goto out_destroy_policy;
1458
}
1459
1460
ret = freq_qos_add_request(&policy->constraints,
1461
policy->min_freq_req, FREQ_QOS_MIN,
1462
FREQ_QOS_MIN_DEFAULT_VALUE);
1463
if (ret < 0) {
1464
/*
1465
* So we don't call freq_qos_remove_request() for an
1466
* uninitialized request.
1467
*/
1468
kfree(policy->min_freq_req);
1469
policy->min_freq_req = NULL;
1470
goto out_destroy_policy;
1471
}
1472
1473
/*
1474
* This must be initialized right here to avoid calling
1475
* freq_qos_remove_request() on uninitialized request in case
1476
* of errors.
1477
*/
1478
policy->max_freq_req = policy->min_freq_req + 1;
1479
1480
ret = freq_qos_add_request(&policy->constraints,
1481
policy->max_freq_req, FREQ_QOS_MAX,
1482
FREQ_QOS_MAX_DEFAULT_VALUE);
1483
if (ret < 0) {
1484
policy->max_freq_req = NULL;
1485
goto out_destroy_policy;
1486
}
1487
1488
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1489
CPUFREQ_CREATE_POLICY, policy);
1490
} else {
1491
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
1492
if (ret < 0)
1493
goto out_destroy_policy;
1494
}
1495
1496
if (cpufreq_driver->get && has_target()) {
1497
policy->cur = cpufreq_driver->get(policy->cpu);
1498
if (!policy->cur) {
1499
ret = -EIO;
1500
pr_err("%s: ->get() failed\n", __func__);
1501
goto out_destroy_policy;
1502
}
1503
}
1504
1505
/*
1506
* Sometimes boot loaders set CPU frequency to a value outside of
1507
* frequency table present with cpufreq core. In such cases CPU might be
1508
* unstable if it has to run on that frequency for long duration of time
1509
* and so its better to set it to a frequency which is specified in
1510
* freq-table. This also makes cpufreq stats inconsistent as
1511
* cpufreq-stats would fail to register because current frequency of CPU
1512
* isn't found in freq-table.
1513
*
1514
* Because we don't want this change to effect boot process badly, we go
1515
* for the next freq which is >= policy->cur ('cur' must be set by now,
1516
* otherwise we will end up setting freq to lowest of the table as 'cur'
1517
* is initialized to zero).
1518
*
1519
* We are passing target-freq as "policy->cur - 1" otherwise
1520
* __cpufreq_driver_target() would simply fail, as policy->cur will be
1521
* equal to target-freq.
1522
*/
1523
if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1524
&& has_target()) {
1525
unsigned int old_freq = policy->cur;
1526
1527
/* Are we running at unknown frequency ? */
1528
ret = cpufreq_frequency_table_get_index(policy, old_freq);
1529
if (ret == -EINVAL) {
1530
ret = __cpufreq_driver_target(policy, old_freq - 1,
1531
CPUFREQ_RELATION_L);
1532
1533
/*
1534
* Reaching here after boot in a few seconds may not
1535
* mean that system will remain stable at "unknown"
1536
* frequency for longer duration. Hence, a BUG_ON().
1537
*/
1538
BUG_ON(ret);
1539
pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
1540
__func__, policy->cpu, old_freq, policy->cur);
1541
}
1542
}
1543
1544
if (new_policy) {
1545
ret = cpufreq_add_dev_interface(policy);
1546
if (ret)
1547
goto out_destroy_policy;
1548
1549
cpufreq_stats_create_table(policy);
1550
1551
write_lock_irqsave(&cpufreq_driver_lock, flags);
1552
list_add(&policy->policy_list, &cpufreq_policy_list);
1553
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1554
1555
/*
1556
* Register with the energy model before
1557
* em_rebuild_sched_domains() is called, which will result
1558
* in rebuilding of the sched domains, which should only be done
1559
* once the energy model is properly initialized for the policy
1560
* first.
1561
*
1562
* Also, this should be called before the policy is registered
1563
* with cooling framework.
1564
*/
1565
if (cpufreq_driver->register_em)
1566
cpufreq_driver->register_em(policy);
1567
}
1568
1569
ret = cpufreq_init_policy(policy);
1570
if (ret) {
1571
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1572
__func__, cpu, ret);
1573
goto out_destroy_policy;
1574
}
1575
1576
return 0;
1577
1578
out_destroy_policy:
1579
for_each_cpu(j, policy->real_cpus)
1580
remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1581
1582
out_offline_policy:
1583
if (cpufreq_driver->offline)
1584
cpufreq_driver->offline(policy);
1585
1586
out_exit_policy:
1587
if (cpufreq_driver->exit)
1588
cpufreq_driver->exit(policy);
1589
1590
out_clear_policy:
1591
cpumask_clear(policy->cpus);
1592
1593
return ret;
1594
}
1595
1596
static int cpufreq_online(unsigned int cpu)
1597
{
1598
struct cpufreq_policy *policy;
1599
bool new_policy;
1600
int ret;
1601
1602
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1603
1604
/* Check if this CPU already has a policy to manage it */
1605
policy = per_cpu(cpufreq_cpu_data, cpu);
1606
if (policy) {
1607
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1608
if (!policy_is_inactive(policy))
1609
return cpufreq_add_policy_cpu(policy, cpu);
1610
1611
/* This is the only online CPU for the policy. Start over. */
1612
new_policy = false;
1613
} else {
1614
new_policy = true;
1615
policy = cpufreq_policy_alloc(cpu);
1616
if (!policy)
1617
return -ENOMEM;
1618
}
1619
1620
ret = cpufreq_policy_online(policy, cpu, new_policy);
1621
if (ret) {
1622
cpufreq_policy_free(policy);
1623
return ret;
1624
}
1625
1626
kobject_uevent(&policy->kobj, KOBJ_ADD);
1627
1628
/* Callback for handling stuff after policy is ready */
1629
if (cpufreq_driver->ready)
1630
cpufreq_driver->ready(policy);
1631
1632
/* Register cpufreq cooling only for a new policy */
1633
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
1634
policy->cdev = of_cpufreq_cooling_register(policy);
1635
1636
/*
1637
* Let the per-policy boost flag mirror the cpufreq_driver boost during
1638
* initialization for a new policy. For an existing policy, maintain the
1639
* previous boost value unless global boost is disabled.
1640
*/
1641
if (cpufreq_driver->set_boost && policy->boost_supported &&
1642
(new_policy || !cpufreq_boost_enabled())) {
1643
ret = policy_set_boost(policy, cpufreq_boost_enabled());
1644
if (ret) {
1645
/* If the set_boost fails, the online operation is not affected */
1646
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
1647
str_enable_disable(cpufreq_boost_enabled()));
1648
}
1649
}
1650
1651
pr_debug("initialization complete\n");
1652
1653
return 0;
1654
}
1655
1656
/**
1657
* cpufreq_add_dev - the cpufreq interface for a CPU device.
1658
* @dev: CPU device.
1659
* @sif: Subsystem interface structure pointer (not used)
1660
*/
1661
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1662
{
1663
struct cpufreq_policy *policy;
1664
unsigned cpu = dev->id;
1665
int ret;
1666
1667
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1668
1669
if (cpu_online(cpu)) {
1670
ret = cpufreq_online(cpu);
1671
if (ret)
1672
return ret;
1673
}
1674
1675
/* Create sysfs link on CPU registration */
1676
policy = per_cpu(cpufreq_cpu_data, cpu);
1677
if (policy)
1678
add_cpu_dev_symlink(policy, cpu, dev);
1679
1680
return 0;
1681
}
1682
1683
static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1684
{
1685
int ret;
1686
1687
if (has_target())
1688
cpufreq_stop_governor(policy);
1689
1690
cpumask_clear_cpu(cpu, policy->cpus);
1691
1692
if (!policy_is_inactive(policy)) {
1693
/* Nominate a new CPU if necessary. */
1694
if (cpu == policy->cpu)
1695
policy->cpu = cpumask_any(policy->cpus);
1696
1697
/* Start the governor again for the active policy. */
1698
if (has_target()) {
1699
ret = cpufreq_start_governor(policy);
1700
if (ret)
1701
pr_err("%s: Failed to start governor\n", __func__);
1702
}
1703
1704
return;
1705
}
1706
1707
if (has_target()) {
1708
strscpy(policy->last_governor, policy->governor->name,
1709
CPUFREQ_NAME_LEN);
1710
cpufreq_exit_governor(policy);
1711
} else {
1712
policy->last_policy = policy->policy;
1713
}
1714
1715
/*
1716
* Perform the ->offline() during light-weight tear-down, as
1717
* that allows fast recovery when the CPU comes back.
1718
*/
1719
if (cpufreq_driver->offline) {
1720
cpufreq_driver->offline(policy);
1721
return;
1722
}
1723
1724
if (cpufreq_driver->exit)
1725
cpufreq_driver->exit(policy);
1726
1727
policy->freq_table = NULL;
1728
}
1729
1730
static int cpufreq_offline(unsigned int cpu)
1731
{
1732
struct cpufreq_policy *policy;
1733
1734
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1735
1736
policy = cpufreq_cpu_get_raw(cpu);
1737
if (!policy) {
1738
pr_debug("%s: No cpu_data found\n", __func__);
1739
return 0;
1740
}
1741
1742
guard(cpufreq_policy_write)(policy);
1743
1744
__cpufreq_offline(cpu, policy);
1745
1746
return 0;
1747
}
1748
1749
/*
1750
* cpufreq_remove_dev - remove a CPU device
1751
*
1752
* Removes the cpufreq interface for a CPU device.
1753
*/
1754
static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1755
{
1756
unsigned int cpu = dev->id;
1757
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1758
1759
if (!policy)
1760
return;
1761
1762
scoped_guard(cpufreq_policy_write, policy) {
1763
if (cpu_online(cpu))
1764
__cpufreq_offline(cpu, policy);
1765
1766
remove_cpu_dev_symlink(policy, cpu, dev);
1767
1768
if (!cpumask_empty(policy->real_cpus))
1769
return;
1770
1771
/*
1772
* Unregister cpufreq cooling once all the CPUs of the policy
1773
* are removed.
1774
*/
1775
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1776
cpufreq_cooling_unregister(policy->cdev);
1777
policy->cdev = NULL;
1778
}
1779
1780
/* We did light-weight exit earlier, do full tear down now */
1781
if (cpufreq_driver->offline && cpufreq_driver->exit)
1782
cpufreq_driver->exit(policy);
1783
}
1784
1785
cpufreq_policy_free(policy);
1786
}
1787
1788
/**
1789
* cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1790
* @policy: Policy managing CPUs.
1791
* @new_freq: New CPU frequency.
1792
*
1793
* Adjust to the current frequency first and clean up later by either calling
1794
* cpufreq_update_policy(), or scheduling handle_update().
1795
*/
1796
static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1797
unsigned int new_freq)
1798
{
1799
struct cpufreq_freqs freqs;
1800
1801
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1802
policy->cur, new_freq);
1803
1804
freqs.old = policy->cur;
1805
freqs.new = new_freq;
1806
1807
cpufreq_freq_transition_begin(policy, &freqs);
1808
cpufreq_freq_transition_end(policy, &freqs, 0);
1809
}
1810
1811
static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1812
{
1813
unsigned int new_freq;
1814
1815
if (!cpufreq_driver->get)
1816
return 0;
1817
1818
new_freq = cpufreq_driver->get(policy->cpu);
1819
if (!new_freq)
1820
return 0;
1821
1822
/*
1823
* If fast frequency switching is used with the given policy, the check
1824
* against policy->cur is pointless, so skip it in that case.
1825
*/
1826
if (policy->fast_switch_enabled || !has_target())
1827
return new_freq;
1828
1829
if (policy->cur != new_freq) {
1830
/*
1831
* For some platforms, the frequency returned by hardware may be
1832
* slightly different from what is provided in the frequency
1833
* table, for example hardware may return 499 MHz instead of 500
1834
* MHz. In such cases it is better to avoid getting into
1835
* unnecessary frequency updates.
1836
*/
1837
if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
1838
return policy->cur;
1839
1840
cpufreq_out_of_sync(policy, new_freq);
1841
if (update)
1842
schedule_work(&policy->update);
1843
}
1844
1845
return new_freq;
1846
}
1847
1848
/**
1849
* cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1850
* @cpu: CPU number
1851
*
1852
* This is the last known freq, without actually getting it from the driver.
1853
* Return value will be same as what is shown in scaling_cur_freq in sysfs.
1854
*/
1855
unsigned int cpufreq_quick_get(unsigned int cpu)
1856
{
1857
unsigned long flags;
1858
1859
read_lock_irqsave(&cpufreq_driver_lock, flags);
1860
1861
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1862
unsigned int ret_freq = cpufreq_driver->get(cpu);
1863
1864
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1865
1866
return ret_freq;
1867
}
1868
1869
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1870
1871
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1872
if (policy)
1873
return policy->cur;
1874
1875
return 0;
1876
}
1877
EXPORT_SYMBOL(cpufreq_quick_get);
1878
1879
/**
1880
* cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1881
* @cpu: CPU number
1882
*
1883
* Just return the max possible frequency for a given CPU.
1884
*/
1885
unsigned int cpufreq_quick_get_max(unsigned int cpu)
1886
{
1887
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1888
if (policy)
1889
return policy->max;
1890
1891
return 0;
1892
}
1893
EXPORT_SYMBOL(cpufreq_quick_get_max);
1894
1895
/**
1896
* cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1897
* @cpu: CPU number
1898
*
1899
* The default return value is the max_freq field of cpuinfo.
1900
*/
1901
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1902
{
1903
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1904
if (policy)
1905
return policy->cpuinfo.max_freq;
1906
1907
return 0;
1908
}
1909
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1910
1911
static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1912
{
1913
if (unlikely(policy_is_inactive(policy)))
1914
return 0;
1915
1916
return cpufreq_verify_current_freq(policy, true);
1917
}
1918
1919
/**
1920
* cpufreq_get - get the current CPU frequency (in kHz)
1921
* @cpu: CPU number
1922
*
1923
* Get the CPU current (static) CPU frequency
1924
*/
1925
unsigned int cpufreq_get(unsigned int cpu)
1926
{
1927
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1928
if (!policy)
1929
return 0;
1930
1931
guard(cpufreq_policy_read)(policy);
1932
1933
return __cpufreq_get(policy);
1934
}
1935
EXPORT_SYMBOL(cpufreq_get);
1936
1937
static struct subsys_interface cpufreq_interface = {
1938
.name = "cpufreq",
1939
.subsys = &cpu_subsys,
1940
.add_dev = cpufreq_add_dev,
1941
.remove_dev = cpufreq_remove_dev,
1942
};
1943
1944
/*
1945
* In case platform wants some specific frequency to be configured
1946
* during suspend..
1947
*/
1948
int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1949
{
1950
int ret;
1951
1952
if (!policy->suspend_freq) {
1953
pr_debug("%s: suspend_freq not defined\n", __func__);
1954
return 0;
1955
}
1956
1957
pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1958
policy->suspend_freq);
1959
1960
ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1961
CPUFREQ_RELATION_H);
1962
if (ret)
1963
pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1964
__func__, policy->suspend_freq, ret);
1965
1966
return ret;
1967
}
1968
EXPORT_SYMBOL(cpufreq_generic_suspend);
1969
1970
/**
1971
* cpufreq_suspend() - Suspend CPUFreq governors.
1972
*
1973
* Called during system wide Suspend/Hibernate cycles for suspending governors
1974
* as some platforms can't change frequency after this point in suspend cycle.
1975
* Because some of the devices (like: i2c, regulators, etc) they use for
1976
* changing frequency are suspended quickly after this point.
1977
*/
1978
void cpufreq_suspend(void)
1979
{
1980
struct cpufreq_policy *policy;
1981
1982
if (!cpufreq_driver)
1983
return;
1984
1985
if (!has_target() && !cpufreq_driver->suspend)
1986
goto suspend;
1987
1988
pr_debug("%s: Suspending Governors\n", __func__);
1989
1990
for_each_active_policy(policy) {
1991
if (has_target()) {
1992
scoped_guard(cpufreq_policy_write, policy) {
1993
cpufreq_stop_governor(policy);
1994
}
1995
}
1996
1997
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1998
pr_err("%s: Failed to suspend driver: %s\n", __func__,
1999
cpufreq_driver->name);
2000
}
2001
2002
suspend:
2003
cpufreq_suspended = true;
2004
}
2005
2006
/**
2007
* cpufreq_resume() - Resume CPUFreq governors.
2008
*
2009
* Called during system wide Suspend/Hibernate cycle for resuming governors that
2010
* are suspended with cpufreq_suspend().
2011
*/
2012
void cpufreq_resume(void)
2013
{
2014
struct cpufreq_policy *policy;
2015
int ret;
2016
2017
if (!cpufreq_driver)
2018
return;
2019
2020
if (unlikely(!cpufreq_suspended))
2021
return;
2022
2023
cpufreq_suspended = false;
2024
2025
if (!has_target() && !cpufreq_driver->resume)
2026
return;
2027
2028
pr_debug("%s: Resuming Governors\n", __func__);
2029
2030
for_each_active_policy(policy) {
2031
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
2032
pr_err("%s: Failed to resume driver: %s\n", __func__,
2033
cpufreq_driver->name);
2034
} else if (has_target()) {
2035
scoped_guard(cpufreq_policy_write, policy) {
2036
ret = cpufreq_start_governor(policy);
2037
}
2038
2039
if (ret)
2040
pr_err("%s: Failed to start governor for CPU%u's policy\n",
2041
__func__, policy->cpu);
2042
}
2043
}
2044
}
2045
2046
/**
2047
* cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
2048
* @flags: Flags to test against the current cpufreq driver's flags.
2049
*
2050
* Assumes that the driver is there, so callers must ensure that this is the
2051
* case.
2052
*/
2053
bool cpufreq_driver_test_flags(u16 flags)
2054
{
2055
return !!(cpufreq_driver->flags & flags);
2056
}
2057
2058
/**
2059
* cpufreq_get_current_driver - Return the current driver's name.
2060
*
2061
* Return the name string of the currently registered cpufreq driver or NULL if
2062
* none.
2063
*/
2064
const char *cpufreq_get_current_driver(void)
2065
{
2066
if (cpufreq_driver)
2067
return cpufreq_driver->name;
2068
2069
return NULL;
2070
}
2071
EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
2072
2073
/**
2074
* cpufreq_get_driver_data - Return current driver data.
2075
*
2076
* Return the private data of the currently registered cpufreq driver, or NULL
2077
* if no cpufreq driver has been registered.
2078
*/
2079
void *cpufreq_get_driver_data(void)
2080
{
2081
if (cpufreq_driver)
2082
return cpufreq_driver->driver_data;
2083
2084
return NULL;
2085
}
2086
EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
2087
2088
/*********************************************************************
2089
* NOTIFIER LISTS INTERFACE *
2090
*********************************************************************/
2091
2092
/**
2093
* cpufreq_register_notifier - Register a notifier with cpufreq.
2094
* @nb: notifier function to register.
2095
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2096
*
2097
* Add a notifier to one of two lists: either a list of notifiers that run on
2098
* clock rate changes (once before and once after every transition), or a list
2099
* of notifiers that ron on cpufreq policy changes.
2100
*
2101
* This function may sleep and it has the same return values as
2102
* blocking_notifier_chain_register().
2103
*/
2104
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2105
{
2106
int ret;
2107
2108
if (cpufreq_disabled())
2109
return -EINVAL;
2110
2111
switch (list) {
2112
case CPUFREQ_TRANSITION_NOTIFIER:
2113
mutex_lock(&cpufreq_fast_switch_lock);
2114
2115
if (cpufreq_fast_switch_count > 0) {
2116
mutex_unlock(&cpufreq_fast_switch_lock);
2117
return -EBUSY;
2118
}
2119
ret = srcu_notifier_chain_register(
2120
&cpufreq_transition_notifier_list, nb);
2121
if (!ret)
2122
cpufreq_fast_switch_count--;
2123
2124
mutex_unlock(&cpufreq_fast_switch_lock);
2125
break;
2126
case CPUFREQ_POLICY_NOTIFIER:
2127
ret = blocking_notifier_chain_register(
2128
&cpufreq_policy_notifier_list, nb);
2129
break;
2130
default:
2131
ret = -EINVAL;
2132
}
2133
2134
return ret;
2135
}
2136
EXPORT_SYMBOL(cpufreq_register_notifier);
2137
2138
/**
2139
* cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2140
* @nb: notifier block to be unregistered.
2141
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2142
*
2143
* Remove a notifier from one of the cpufreq notifier lists.
2144
*
2145
* This function may sleep and it has the same return values as
2146
* blocking_notifier_chain_unregister().
2147
*/
2148
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2149
{
2150
int ret;
2151
2152
if (cpufreq_disabled())
2153
return -EINVAL;
2154
2155
switch (list) {
2156
case CPUFREQ_TRANSITION_NOTIFIER:
2157
mutex_lock(&cpufreq_fast_switch_lock);
2158
2159
ret = srcu_notifier_chain_unregister(
2160
&cpufreq_transition_notifier_list, nb);
2161
if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2162
cpufreq_fast_switch_count++;
2163
2164
mutex_unlock(&cpufreq_fast_switch_lock);
2165
break;
2166
case CPUFREQ_POLICY_NOTIFIER:
2167
ret = blocking_notifier_chain_unregister(
2168
&cpufreq_policy_notifier_list, nb);
2169
break;
2170
default:
2171
ret = -EINVAL;
2172
}
2173
2174
return ret;
2175
}
2176
EXPORT_SYMBOL(cpufreq_unregister_notifier);
2177
2178
2179
/*********************************************************************
2180
* GOVERNORS *
2181
*********************************************************************/
2182
2183
/**
2184
* cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2185
* @policy: cpufreq policy to switch the frequency for.
2186
* @target_freq: New frequency to set (may be approximate).
2187
*
2188
* Carry out a fast frequency switch without sleeping.
2189
*
2190
* The driver's ->fast_switch() callback invoked by this function must be
2191
* suitable for being called from within RCU-sched read-side critical sections
2192
* and it is expected to select the minimum available frequency greater than or
2193
* equal to @target_freq (CPUFREQ_RELATION_L).
2194
*
2195
* This function must not be called if policy->fast_switch_enabled is unset.
2196
*
2197
* Governors calling this function must guarantee that it will never be invoked
2198
* twice in parallel for the same policy and that it will never be called in
2199
* parallel with either ->target() or ->target_index() for the same policy.
2200
*
2201
* Returns the actual frequency set for the CPU.
2202
*
2203
* If 0 is returned by the driver's ->fast_switch() callback to indicate an
2204
* error condition, the hardware configuration must be preserved.
2205
*/
2206
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2207
unsigned int target_freq)
2208
{
2209
unsigned int freq;
2210
int cpu;
2211
2212
target_freq = clamp_val(target_freq, policy->min, policy->max);
2213
freq = cpufreq_driver->fast_switch(policy, target_freq);
2214
2215
if (!freq)
2216
return 0;
2217
2218
policy->cur = freq;
2219
arch_set_freq_scale(policy->related_cpus, freq,
2220
arch_scale_freq_ref(policy->cpu));
2221
cpufreq_stats_record_transition(policy, freq);
2222
2223
if (trace_cpu_frequency_enabled()) {
2224
for_each_cpu(cpu, policy->cpus)
2225
trace_cpu_frequency(freq, cpu);
2226
}
2227
2228
return freq;
2229
}
2230
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2231
2232
/**
2233
* cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2234
* @cpu: Target CPU.
2235
* @min_perf: Minimum (required) performance level (units of @capacity).
2236
* @target_perf: Target (desired) performance level (units of @capacity).
2237
* @capacity: Capacity of the target CPU.
2238
*
2239
* Carry out a fast performance level switch of @cpu without sleeping.
2240
*
2241
* The driver's ->adjust_perf() callback invoked by this function must be
2242
* suitable for being called from within RCU-sched read-side critical sections
2243
* and it is expected to select a suitable performance level equal to or above
2244
* @min_perf and preferably equal to or below @target_perf.
2245
*
2246
* This function must not be called if policy->fast_switch_enabled is unset.
2247
*
2248
* Governors calling this function must guarantee that it will never be invoked
2249
* twice in parallel for the same CPU and that it will never be called in
2250
* parallel with either ->target() or ->target_index() or ->fast_switch() for
2251
* the same CPU.
2252
*/
2253
void cpufreq_driver_adjust_perf(unsigned int cpu,
2254
unsigned long min_perf,
2255
unsigned long target_perf,
2256
unsigned long capacity)
2257
{
2258
cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2259
}
2260
2261
/**
2262
* cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2263
*
2264
* Return 'true' if the ->adjust_perf callback is present for the
2265
* current driver or 'false' otherwise.
2266
*/
2267
bool cpufreq_driver_has_adjust_perf(void)
2268
{
2269
return !!cpufreq_driver->adjust_perf;
2270
}
2271
2272
/* Must set freqs->new to intermediate frequency */
2273
static int __target_intermediate(struct cpufreq_policy *policy,
2274
struct cpufreq_freqs *freqs, int index)
2275
{
2276
int ret;
2277
2278
freqs->new = cpufreq_driver->get_intermediate(policy, index);
2279
2280
/* We don't need to switch to intermediate freq */
2281
if (!freqs->new)
2282
return 0;
2283
2284
pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2285
__func__, policy->cpu, freqs->old, freqs->new);
2286
2287
cpufreq_freq_transition_begin(policy, freqs);
2288
ret = cpufreq_driver->target_intermediate(policy, index);
2289
cpufreq_freq_transition_end(policy, freqs, ret);
2290
2291
if (ret)
2292
pr_err("%s: Failed to change to intermediate frequency: %d\n",
2293
__func__, ret);
2294
2295
return ret;
2296
}
2297
2298
static int __target_index(struct cpufreq_policy *policy, int index)
2299
{
2300
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2301
unsigned int restore_freq, intermediate_freq = 0;
2302
unsigned int newfreq = policy->freq_table[index].frequency;
2303
int retval = -EINVAL;
2304
bool notify;
2305
2306
if (newfreq == policy->cur)
2307
return 0;
2308
2309
/* Save last value to restore later on errors */
2310
restore_freq = policy->cur;
2311
2312
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2313
if (notify) {
2314
/* Handle switching to intermediate frequency */
2315
if (cpufreq_driver->get_intermediate) {
2316
retval = __target_intermediate(policy, &freqs, index);
2317
if (retval)
2318
return retval;
2319
2320
intermediate_freq = freqs.new;
2321
/* Set old freq to intermediate */
2322
if (intermediate_freq)
2323
freqs.old = freqs.new;
2324
}
2325
2326
freqs.new = newfreq;
2327
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2328
__func__, policy->cpu, freqs.old, freqs.new);
2329
2330
cpufreq_freq_transition_begin(policy, &freqs);
2331
}
2332
2333
retval = cpufreq_driver->target_index(policy, index);
2334
if (retval)
2335
pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2336
retval);
2337
2338
if (notify) {
2339
cpufreq_freq_transition_end(policy, &freqs, retval);
2340
2341
/*
2342
* Failed after setting to intermediate freq? Driver should have
2343
* reverted back to initial frequency and so should we. Check
2344
* here for intermediate_freq instead of get_intermediate, in
2345
* case we haven't switched to intermediate freq at all.
2346
*/
2347
if (unlikely(retval && intermediate_freq)) {
2348
freqs.old = intermediate_freq;
2349
freqs.new = restore_freq;
2350
cpufreq_freq_transition_begin(policy, &freqs);
2351
cpufreq_freq_transition_end(policy, &freqs, 0);
2352
}
2353
}
2354
2355
return retval;
2356
}
2357
2358
int __cpufreq_driver_target(struct cpufreq_policy *policy,
2359
unsigned int target_freq,
2360
unsigned int relation)
2361
{
2362
unsigned int old_target_freq = target_freq;
2363
2364
if (cpufreq_disabled())
2365
return -ENODEV;
2366
2367
target_freq = __resolve_freq(policy, target_freq, policy->min,
2368
policy->max, relation);
2369
2370
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2371
policy->cpu, target_freq, relation, old_target_freq);
2372
2373
/*
2374
* This might look like a redundant call as we are checking it again
2375
* after finding index. But it is left intentionally for cases where
2376
* exactly same freq is called again and so we can save on few function
2377
* calls.
2378
*/
2379
if (target_freq == policy->cur &&
2380
!(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2381
return 0;
2382
2383
if (cpufreq_driver->target) {
2384
/*
2385
* If the driver hasn't setup a single inefficient frequency,
2386
* it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2387
*/
2388
if (!policy->efficiencies_available)
2389
relation &= ~CPUFREQ_RELATION_E;
2390
2391
return cpufreq_driver->target(policy, target_freq, relation);
2392
}
2393
2394
if (!cpufreq_driver->target_index)
2395
return -EINVAL;
2396
2397
return __target_index(policy, policy->cached_resolved_idx);
2398
}
2399
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2400
2401
int cpufreq_driver_target(struct cpufreq_policy *policy,
2402
unsigned int target_freq,
2403
unsigned int relation)
2404
{
2405
guard(cpufreq_policy_write)(policy);
2406
2407
return __cpufreq_driver_target(policy, target_freq, relation);
2408
}
2409
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2410
2411
__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2412
{
2413
return NULL;
2414
}
2415
2416
static int cpufreq_init_governor(struct cpufreq_policy *policy)
2417
{
2418
int ret;
2419
2420
/* Don't start any governor operations if we are entering suspend */
2421
if (cpufreq_suspended)
2422
return 0;
2423
/*
2424
* Governor might not be initiated here if ACPI _PPC changed
2425
* notification happened, so check it.
2426
*/
2427
if (!policy->governor)
2428
return -EINVAL;
2429
2430
/* Platform doesn't want dynamic frequency switching ? */
2431
if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2432
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2433
struct cpufreq_governor *gov = cpufreq_fallback_governor();
2434
2435
if (gov) {
2436
pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2437
policy->governor->name, gov->name);
2438
policy->governor = gov;
2439
} else {
2440
return -EINVAL;
2441
}
2442
}
2443
2444
if (!try_module_get(policy->governor->owner))
2445
return -EINVAL;
2446
2447
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2448
2449
if (policy->governor->init) {
2450
ret = policy->governor->init(policy);
2451
if (ret) {
2452
module_put(policy->governor->owner);
2453
return ret;
2454
}
2455
}
2456
2457
policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2458
2459
return 0;
2460
}
2461
2462
static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2463
{
2464
if (cpufreq_suspended || !policy->governor)
2465
return;
2466
2467
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2468
2469
if (policy->governor->exit)
2470
policy->governor->exit(policy);
2471
2472
module_put(policy->governor->owner);
2473
}
2474
2475
int cpufreq_start_governor(struct cpufreq_policy *policy)
2476
{
2477
int ret;
2478
2479
if (cpufreq_suspended)
2480
return 0;
2481
2482
if (!policy->governor)
2483
return -EINVAL;
2484
2485
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2486
2487
cpufreq_verify_current_freq(policy, false);
2488
2489
if (policy->governor->start) {
2490
ret = policy->governor->start(policy);
2491
if (ret)
2492
return ret;
2493
}
2494
2495
if (policy->governor->limits)
2496
policy->governor->limits(policy);
2497
2498
return 0;
2499
}
2500
2501
void cpufreq_stop_governor(struct cpufreq_policy *policy)
2502
{
2503
if (cpufreq_suspended || !policy->governor)
2504
return;
2505
2506
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2507
2508
if (policy->governor->stop)
2509
policy->governor->stop(policy);
2510
}
2511
2512
static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2513
{
2514
if (cpufreq_suspended || !policy->governor)
2515
return;
2516
2517
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2518
2519
if (policy->governor->limits)
2520
policy->governor->limits(policy);
2521
}
2522
2523
int cpufreq_register_governor(struct cpufreq_governor *governor)
2524
{
2525
int err;
2526
2527
if (!governor)
2528
return -EINVAL;
2529
2530
if (cpufreq_disabled())
2531
return -ENODEV;
2532
2533
mutex_lock(&cpufreq_governor_mutex);
2534
2535
err = -EBUSY;
2536
if (!find_governor(governor->name)) {
2537
err = 0;
2538
list_add(&governor->governor_list, &cpufreq_governor_list);
2539
}
2540
2541
mutex_unlock(&cpufreq_governor_mutex);
2542
return err;
2543
}
2544
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2545
2546
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2547
{
2548
struct cpufreq_policy *policy;
2549
unsigned long flags;
2550
2551
if (!governor)
2552
return;
2553
2554
if (cpufreq_disabled())
2555
return;
2556
2557
/* clear last_governor for all inactive policies */
2558
read_lock_irqsave(&cpufreq_driver_lock, flags);
2559
for_each_inactive_policy(policy) {
2560
if (!strcmp(policy->last_governor, governor->name)) {
2561
policy->governor = NULL;
2562
policy->last_governor[0] = '\0';
2563
}
2564
}
2565
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2566
2567
mutex_lock(&cpufreq_governor_mutex);
2568
list_del(&governor->governor_list);
2569
mutex_unlock(&cpufreq_governor_mutex);
2570
}
2571
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2572
2573
2574
/*********************************************************************
2575
* POLICY INTERFACE *
2576
*********************************************************************/
2577
2578
DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
2579
2580
/**
2581
* cpufreq_update_pressure() - Update cpufreq pressure for CPUs
2582
* @policy: cpufreq policy of the CPUs.
2583
*
2584
* Update the value of cpufreq pressure for all @cpus in the policy.
2585
*/
2586
static void cpufreq_update_pressure(struct cpufreq_policy *policy)
2587
{
2588
unsigned long max_capacity, capped_freq, pressure;
2589
u32 max_freq;
2590
int cpu;
2591
2592
cpu = cpumask_first(policy->related_cpus);
2593
max_freq = arch_scale_freq_ref(cpu);
2594
capped_freq = policy->max;
2595
2596
/*
2597
* Handle properly the boost frequencies, which should simply clean
2598
* the cpufreq pressure value.
2599
*/
2600
if (max_freq <= capped_freq) {
2601
pressure = 0;
2602
} else {
2603
max_capacity = arch_scale_cpu_capacity(cpu);
2604
pressure = max_capacity -
2605
mult_frac(max_capacity, capped_freq, max_freq);
2606
}
2607
2608
for_each_cpu(cpu, policy->related_cpus)
2609
WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
2610
}
2611
2612
/**
2613
* cpufreq_set_policy - Modify cpufreq policy parameters.
2614
* @policy: Policy object to modify.
2615
* @new_gov: Policy governor pointer.
2616
* @new_pol: Policy value (for drivers with built-in governors).
2617
*
2618
* Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2619
* limits to be set for the policy, update @policy with the verified limits
2620
* values and either invoke the driver's ->setpolicy() callback (if present) or
2621
* carry out a governor update for @policy. That is, run the current governor's
2622
* ->limits() callback (if @new_gov points to the same object as the one in
2623
* @policy) or replace the governor for @policy with @new_gov.
2624
*
2625
* The cpuinfo part of @policy is not updated by this function.
2626
*/
2627
static int cpufreq_set_policy(struct cpufreq_policy *policy,
2628
struct cpufreq_governor *new_gov,
2629
unsigned int new_pol)
2630
{
2631
struct cpufreq_policy_data new_data;
2632
struct cpufreq_governor *old_gov;
2633
int ret;
2634
2635
memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2636
new_data.freq_table = policy->freq_table;
2637
new_data.cpu = policy->cpu;
2638
/*
2639
* PM QoS framework collects all the requests from users and provide us
2640
* the final aggregated value here.
2641
*/
2642
new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2643
new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2644
2645
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2646
new_data.cpu, new_data.min, new_data.max);
2647
2648
/*
2649
* Verify that the CPU speed can be set within these limits and make sure
2650
* that min <= max.
2651
*/
2652
ret = cpufreq_driver->verify(&new_data);
2653
if (ret)
2654
return ret;
2655
2656
/*
2657
* Resolve policy min/max to available frequencies. It ensures
2658
* no frequency resolution will neither overshoot the requested maximum
2659
* nor undershoot the requested minimum.
2660
*
2661
* Avoid storing intermediate values in policy->max or policy->min and
2662
* compiler optimizations around them because they may be accessed
2663
* concurrently by cpufreq_driver_resolve_freq() during the update.
2664
*/
2665
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
2666
new_data.min, new_data.max,
2667
CPUFREQ_RELATION_H));
2668
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
2669
new_data.max, CPUFREQ_RELATION_L);
2670
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
2671
2672
trace_cpu_frequency_limits(policy);
2673
2674
cpufreq_update_pressure(policy);
2675
2676
policy->cached_target_freq = UINT_MAX;
2677
2678
pr_debug("new min and max freqs are %u - %u kHz\n",
2679
policy->min, policy->max);
2680
2681
if (cpufreq_driver->setpolicy) {
2682
policy->policy = new_pol;
2683
pr_debug("setting range\n");
2684
return cpufreq_driver->setpolicy(policy);
2685
}
2686
2687
if (new_gov == policy->governor) {
2688
pr_debug("governor limits update\n");
2689
cpufreq_governor_limits(policy);
2690
return 0;
2691
}
2692
2693
pr_debug("governor switch\n");
2694
2695
/* save old, working values */
2696
old_gov = policy->governor;
2697
/* end old governor */
2698
if (old_gov) {
2699
cpufreq_stop_governor(policy);
2700
cpufreq_exit_governor(policy);
2701
}
2702
2703
/* start new governor */
2704
policy->governor = new_gov;
2705
ret = cpufreq_init_governor(policy);
2706
if (!ret) {
2707
ret = cpufreq_start_governor(policy);
2708
if (!ret) {
2709
pr_debug("governor change\n");
2710
return 0;
2711
}
2712
cpufreq_exit_governor(policy);
2713
}
2714
2715
/* new governor failed, so re-start old one */
2716
pr_debug("starting governor %s failed\n", policy->governor->name);
2717
if (old_gov) {
2718
policy->governor = old_gov;
2719
if (cpufreq_init_governor(policy)) {
2720
policy->governor = NULL;
2721
} else if (cpufreq_start_governor(policy)) {
2722
cpufreq_exit_governor(policy);
2723
policy->governor = NULL;
2724
}
2725
}
2726
2727
return ret;
2728
}
2729
2730
static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
2731
{
2732
guard(cpufreq_policy_write)(policy);
2733
2734
/*
2735
* BIOS might change freq behind our back
2736
* -> ask driver for current freq and notify governors about a change
2737
*/
2738
if (cpufreq_driver->get && has_target() &&
2739
(cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2740
return;
2741
2742
refresh_frequency_limits(policy);
2743
}
2744
2745
/**
2746
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2747
* @cpu: CPU to re-evaluate the policy for.
2748
*
2749
* Update the current frequency for the cpufreq policy of @cpu and use
2750
* cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2751
* evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2752
* for the policy in question, among other things.
2753
*/
2754
void cpufreq_update_policy(unsigned int cpu)
2755
{
2756
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
2757
if (!policy)
2758
return;
2759
2760
cpufreq_policy_refresh(policy);
2761
}
2762
EXPORT_SYMBOL(cpufreq_update_policy);
2763
2764
/**
2765
* cpufreq_update_limits - Update policy limits for a given CPU.
2766
* @cpu: CPU to update the policy limits for.
2767
*
2768
* Invoke the driver's ->update_limits callback if present or call
2769
* cpufreq_policy_refresh() for @cpu.
2770
*/
2771
void cpufreq_update_limits(unsigned int cpu)
2772
{
2773
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
2774
if (!policy)
2775
return;
2776
2777
if (cpufreq_driver->update_limits)
2778
cpufreq_driver->update_limits(policy);
2779
else
2780
cpufreq_policy_refresh(policy);
2781
}
2782
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2783
2784
/*********************************************************************
2785
* BOOST *
2786
*********************************************************************/
2787
int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2788
{
2789
int ret;
2790
2791
if (!policy->freq_table)
2792
return -ENXIO;
2793
2794
ret = cpufreq_frequency_table_cpuinfo(policy);
2795
if (ret) {
2796
pr_err("%s: Policy frequency update failed\n", __func__);
2797
return ret;
2798
}
2799
2800
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2801
if (ret < 0)
2802
return ret;
2803
2804
return 0;
2805
}
2806
EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
2807
2808
static int cpufreq_boost_trigger_state(int state)
2809
{
2810
struct cpufreq_policy *policy;
2811
unsigned long flags;
2812
int ret = -EOPNOTSUPP;
2813
2814
/*
2815
* Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
2816
* make sure all policies are in sync with global boost flag.
2817
*/
2818
2819
write_lock_irqsave(&cpufreq_driver_lock, flags);
2820
cpufreq_driver->boost_enabled = state;
2821
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2822
2823
cpus_read_lock();
2824
for_each_active_policy(policy) {
2825
if (!policy->boost_supported)
2826
continue;
2827
2828
ret = policy_set_boost(policy, state);
2829
if (unlikely(ret))
2830
break;
2831
}
2832
2833
cpus_read_unlock();
2834
2835
if (likely(!ret))
2836
return 0;
2837
2838
write_lock_irqsave(&cpufreq_driver_lock, flags);
2839
cpufreq_driver->boost_enabled = !state;
2840
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2841
2842
pr_err("%s: Cannot %s BOOST\n",
2843
__func__, str_enable_disable(state));
2844
2845
return ret;
2846
}
2847
2848
static bool cpufreq_boost_supported(void)
2849
{
2850
return cpufreq_driver->set_boost;
2851
}
2852
2853
static int create_boost_sysfs_file(void)
2854
{
2855
int ret;
2856
2857
ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2858
if (ret)
2859
pr_err("%s: cannot register global BOOST sysfs file\n",
2860
__func__);
2861
2862
return ret;
2863
}
2864
2865
static void remove_boost_sysfs_file(void)
2866
{
2867
if (cpufreq_boost_supported())
2868
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2869
}
2870
2871
bool cpufreq_boost_enabled(void)
2872
{
2873
return cpufreq_driver->boost_enabled;
2874
}
2875
EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2876
2877
/*********************************************************************
2878
* REGISTER / UNREGISTER CPUFREQ DRIVER *
2879
*********************************************************************/
2880
static enum cpuhp_state hp_online;
2881
2882
static int cpuhp_cpufreq_online(unsigned int cpu)
2883
{
2884
cpufreq_online(cpu);
2885
2886
return 0;
2887
}
2888
2889
static int cpuhp_cpufreq_offline(unsigned int cpu)
2890
{
2891
cpufreq_offline(cpu);
2892
2893
return 0;
2894
}
2895
2896
/**
2897
* cpufreq_register_driver - register a CPU Frequency driver
2898
* @driver_data: A struct cpufreq_driver containing the values#
2899
* submitted by the CPU Frequency driver.
2900
*
2901
* Registers a CPU Frequency driver to this core code. This code
2902
* returns zero on success, -EEXIST when another driver got here first
2903
* (and isn't unregistered in the meantime).
2904
*
2905
*/
2906
int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2907
{
2908
unsigned long flags;
2909
int ret;
2910
2911
if (cpufreq_disabled())
2912
return -ENODEV;
2913
2914
/*
2915
* The cpufreq core depends heavily on the availability of device
2916
* structure, make sure they are available before proceeding further.
2917
*/
2918
if (!get_cpu_device(0))
2919
return -EPROBE_DEFER;
2920
2921
if (!driver_data || !driver_data->verify || !driver_data->init ||
2922
(driver_data->target_index && driver_data->target) ||
2923
(!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
2924
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2925
(!driver_data->online != !driver_data->offline) ||
2926
(driver_data->adjust_perf && !driver_data->fast_switch))
2927
return -EINVAL;
2928
2929
pr_debug("trying to register driver %s\n", driver_data->name);
2930
2931
/* Protect against concurrent CPU online/offline. */
2932
cpus_read_lock();
2933
2934
write_lock_irqsave(&cpufreq_driver_lock, flags);
2935
if (cpufreq_driver) {
2936
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2937
ret = -EEXIST;
2938
goto out;
2939
}
2940
cpufreq_driver = driver_data;
2941
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2942
2943
if (driver_data->setpolicy)
2944
driver_data->flags |= CPUFREQ_CONST_LOOPS;
2945
2946
if (cpufreq_boost_supported()) {
2947
ret = create_boost_sysfs_file();
2948
if (ret)
2949
goto err_null_driver;
2950
}
2951
2952
/*
2953
* Mark support for the scheduler's frequency invariance engine for
2954
* drivers that implement target(), target_index() or fast_switch().
2955
*/
2956
if (!cpufreq_driver->setpolicy) {
2957
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2958
pr_debug("cpufreq: supports frequency invariance\n");
2959
}
2960
2961
ret = subsys_interface_register(&cpufreq_interface);
2962
if (ret)
2963
goto err_boost_unreg;
2964
2965
if (unlikely(list_empty(&cpufreq_policy_list))) {
2966
/* if all ->init() calls failed, unregister */
2967
ret = -ENODEV;
2968
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2969
driver_data->name);
2970
goto err_if_unreg;
2971
}
2972
2973
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2974
"cpufreq:online",
2975
cpuhp_cpufreq_online,
2976
cpuhp_cpufreq_offline);
2977
if (ret < 0)
2978
goto err_if_unreg;
2979
hp_online = ret;
2980
ret = 0;
2981
2982
pr_debug("driver %s up and running\n", driver_data->name);
2983
goto out;
2984
2985
err_if_unreg:
2986
subsys_interface_unregister(&cpufreq_interface);
2987
err_boost_unreg:
2988
if (!cpufreq_driver->setpolicy)
2989
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2990
remove_boost_sysfs_file();
2991
err_null_driver:
2992
write_lock_irqsave(&cpufreq_driver_lock, flags);
2993
cpufreq_driver = NULL;
2994
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2995
out:
2996
cpus_read_unlock();
2997
return ret;
2998
}
2999
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
3000
3001
/*
3002
* cpufreq_unregister_driver - unregister the current CPUFreq driver
3003
*
3004
* Unregister the current CPUFreq driver. Only call this if you have
3005
* the right to do so, i.e. if you have succeeded in initialising before!
3006
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
3007
* currently not initialised.
3008
*/
3009
void cpufreq_unregister_driver(struct cpufreq_driver *driver)
3010
{
3011
unsigned long flags;
3012
3013
if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
3014
return;
3015
3016
pr_debug("unregistering driver %s\n", driver->name);
3017
3018
/* Protect against concurrent cpu hotplug */
3019
cpus_read_lock();
3020
subsys_interface_unregister(&cpufreq_interface);
3021
remove_boost_sysfs_file();
3022
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
3023
cpuhp_remove_state_nocalls_cpuslocked(hp_online);
3024
3025
write_lock_irqsave(&cpufreq_driver_lock, flags);
3026
3027
cpufreq_driver = NULL;
3028
3029
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
3030
cpus_read_unlock();
3031
}
3032
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
3033
3034
static int __init cpufreq_core_init(void)
3035
{
3036
struct cpufreq_governor *gov = cpufreq_default_governor();
3037
struct device *dev_root;
3038
3039
if (cpufreq_disabled())
3040
return -ENODEV;
3041
3042
dev_root = bus_get_dev_root(&cpu_subsys);
3043
if (dev_root) {
3044
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
3045
put_device(dev_root);
3046
}
3047
BUG_ON(!cpufreq_global_kobject);
3048
3049
if (!strlen(default_governor))
3050
strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
3051
3052
return 0;
3053
}
3054
3055
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
3056
{
3057
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
3058
if (!policy) {
3059
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
3060
return false;
3061
}
3062
3063
return sugov_is_governor(policy);
3064
}
3065
3066
bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
3067
{
3068
unsigned int cpu;
3069
3070
/* Do not attempt EAS if schedutil is not being used. */
3071
for_each_cpu(cpu, cpu_mask) {
3072
if (!cpufreq_policy_is_good_for_eas(cpu)) {
3073
pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
3074
cpumask_pr_args(cpu_mask));
3075
return false;
3076
}
3077
}
3078
3079
return true;
3080
}
3081
3082
module_param(off, int, 0444);
3083
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
3084
core_initcall(cpufreq_core_init);
3085
3086