Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cpufreq/cpufreq.c
29265 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* linux/drivers/cpufreq/cpufreq.c
4
*
5
* Copyright (C) 2001 Russell King
6
* (C) 2002 - 2003 Dominik Brodowski <[email protected]>
7
* (C) 2013 Viresh Kumar <[email protected]>
8
*
9
* Oct 2005 - Ashok Raj <[email protected]>
10
* Added handling for CPU hotplug
11
* Feb 2006 - Jacob Shin <[email protected]>
12
* Fix handling for CPU hotplug -- affected CPUs
13
*/
14
15
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17
#include <linux/cpu.h>
18
#include <linux/cpufreq.h>
19
#include <linux/cpu_cooling.h>
20
#include <linux/delay.h>
21
#include <linux/device.h>
22
#include <linux/init.h>
23
#include <linux/kernel_stat.h>
24
#include <linux/module.h>
25
#include <linux/mutex.h>
26
#include <linux/pm_qos.h>
27
#include <linux/slab.h>
28
#include <linux/string_choices.h>
29
#include <linux/suspend.h>
30
#include <linux/syscore_ops.h>
31
#include <linux/tick.h>
32
#include <linux/units.h>
33
#include <trace/events/power.h>
34
35
static LIST_HEAD(cpufreq_policy_list);
36
37
/* Macros to iterate over CPU policies */
38
#define for_each_suitable_policy(__policy, __active) \
39
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
40
if ((__active) == !policy_is_inactive(__policy))
41
42
#define for_each_active_policy(__policy) \
43
for_each_suitable_policy(__policy, true)
44
#define for_each_inactive_policy(__policy) \
45
for_each_suitable_policy(__policy, false)
46
47
/* Iterate over governors */
48
static LIST_HEAD(cpufreq_governor_list);
49
#define for_each_governor(__governor) \
50
list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
51
52
static char default_governor[CPUFREQ_NAME_LEN];
53
54
/*
55
* The "cpufreq driver" - the arch- or hardware-dependent low
56
* level driver of CPUFreq support, and its spinlock. This lock
57
* also protects the cpufreq_cpu_data array.
58
*/
59
static struct cpufreq_driver *cpufreq_driver;
60
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
61
static DEFINE_RWLOCK(cpufreq_driver_lock);
62
63
static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
64
bool cpufreq_supports_freq_invariance(void)
65
{
66
return static_branch_likely(&cpufreq_freq_invariance);
67
}
68
69
/* Flag to suspend/resume CPUFreq governors */
70
static bool cpufreq_suspended;
71
72
static inline bool has_target(void)
73
{
74
return cpufreq_driver->target_index || cpufreq_driver->target;
75
}
76
77
bool has_target_index(void)
78
{
79
return !!cpufreq_driver->target_index;
80
}
81
82
/* internal prototypes */
83
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
84
static int cpufreq_init_governor(struct cpufreq_policy *policy);
85
static void cpufreq_exit_governor(struct cpufreq_policy *policy);
86
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
87
static int cpufreq_set_policy(struct cpufreq_policy *policy,
88
struct cpufreq_governor *new_gov,
89
unsigned int new_pol);
90
static bool cpufreq_boost_supported(void);
91
static int cpufreq_boost_trigger_state(int state);
92
93
/*
94
* Two notifier lists: the "policy" list is involved in the
95
* validation process for a new CPU frequency policy; the
96
* "transition" list for kernel code that needs to handle
97
* changes to devices when the CPU clock speed changes.
98
* The mutex locks both lists.
99
*/
100
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
101
SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
102
103
static int off __read_mostly;
104
static int cpufreq_disabled(void)
105
{
106
return off;
107
}
108
void disable_cpufreq(void)
109
{
110
off = 1;
111
}
112
EXPORT_SYMBOL_GPL(disable_cpufreq);
113
114
static DEFINE_MUTEX(cpufreq_governor_mutex);
115
116
bool have_governor_per_policy(void)
117
{
118
return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
119
}
120
EXPORT_SYMBOL_GPL(have_governor_per_policy);
121
122
static struct kobject *cpufreq_global_kobject;
123
124
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
125
{
126
if (have_governor_per_policy())
127
return &policy->kobj;
128
else
129
return cpufreq_global_kobject;
130
}
131
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
132
133
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
134
{
135
struct kernel_cpustat kcpustat;
136
u64 cur_wall_time;
137
u64 idle_time;
138
u64 busy_time;
139
140
cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
141
142
kcpustat_cpu_fetch(&kcpustat, cpu);
143
144
busy_time = kcpustat.cpustat[CPUTIME_USER];
145
busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
146
busy_time += kcpustat.cpustat[CPUTIME_IRQ];
147
busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
148
busy_time += kcpustat.cpustat[CPUTIME_STEAL];
149
busy_time += kcpustat.cpustat[CPUTIME_NICE];
150
151
idle_time = cur_wall_time - busy_time;
152
if (wall)
153
*wall = div_u64(cur_wall_time, NSEC_PER_USEC);
154
155
return div_u64(idle_time, NSEC_PER_USEC);
156
}
157
158
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
159
{
160
u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
161
162
if (idle_time == -1ULL)
163
return get_cpu_idle_time_jiffy(cpu, wall);
164
else if (!io_busy)
165
idle_time += get_cpu_iowait_time_us(cpu, wall);
166
167
return idle_time;
168
}
169
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
170
171
/*
172
* This is a generic cpufreq init() routine which can be used by cpufreq
173
* drivers of SMP systems. It will do following:
174
* - validate & show freq table passed
175
* - set policies transition latency
176
* - policy->cpus with all possible CPUs
177
*/
178
void cpufreq_generic_init(struct cpufreq_policy *policy,
179
struct cpufreq_frequency_table *table,
180
unsigned int transition_latency)
181
{
182
policy->freq_table = table;
183
policy->cpuinfo.transition_latency = transition_latency;
184
185
/*
186
* The driver only supports the SMP configuration where all processors
187
* share the clock and voltage and clock.
188
*/
189
cpumask_setall(policy->cpus);
190
}
191
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
192
193
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
194
{
195
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
196
197
return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
198
}
199
EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
200
201
unsigned int cpufreq_generic_get(unsigned int cpu)
202
{
203
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
204
205
if (!policy || IS_ERR(policy->clk)) {
206
pr_err("%s: No %s associated to cpu: %d\n",
207
__func__, policy ? "clk" : "policy", cpu);
208
return 0;
209
}
210
211
return clk_get_rate(policy->clk) / 1000;
212
}
213
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
214
215
/**
216
* cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
217
* @cpu: CPU to find the policy for.
218
*
219
* Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
220
* the kobject reference counter of that policy. Return a valid policy on
221
* success or NULL on failure.
222
*
223
* The policy returned by this function has to be released with the help of
224
* cpufreq_cpu_put() to balance its kobject reference counter properly.
225
*/
226
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
227
{
228
struct cpufreq_policy *policy = NULL;
229
unsigned long flags;
230
231
if (WARN_ON(cpu >= nr_cpu_ids))
232
return NULL;
233
234
/* get the cpufreq driver */
235
read_lock_irqsave(&cpufreq_driver_lock, flags);
236
237
if (cpufreq_driver) {
238
/* get the CPU */
239
policy = cpufreq_cpu_get_raw(cpu);
240
if (policy)
241
kobject_get(&policy->kobj);
242
}
243
244
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
245
246
return policy;
247
}
248
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
249
250
/**
251
* cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
252
* @policy: cpufreq policy returned by cpufreq_cpu_get().
253
*/
254
void cpufreq_cpu_put(struct cpufreq_policy *policy)
255
{
256
kobject_put(&policy->kobj);
257
}
258
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
259
260
/*********************************************************************
261
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
262
*********************************************************************/
263
264
/**
265
* adjust_jiffies - Adjust the system "loops_per_jiffy".
266
* @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
267
* @ci: Frequency change information.
268
*
269
* This function alters the system "loops_per_jiffy" for the clock
270
* speed change. Note that loops_per_jiffy cannot be updated on SMP
271
* systems as each CPU might be scaled differently. So, use the arch
272
* per-CPU loops_per_jiffy value wherever possible.
273
*/
274
static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
275
{
276
#ifndef CONFIG_SMP
277
static unsigned long l_p_j_ref;
278
static unsigned int l_p_j_ref_freq;
279
280
if (ci->flags & CPUFREQ_CONST_LOOPS)
281
return;
282
283
if (!l_p_j_ref_freq) {
284
l_p_j_ref = loops_per_jiffy;
285
l_p_j_ref_freq = ci->old;
286
pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
287
l_p_j_ref, l_p_j_ref_freq);
288
}
289
if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
290
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
291
ci->new);
292
pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
293
loops_per_jiffy, ci->new);
294
}
295
#endif
296
}
297
298
/**
299
* cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
300
* @policy: cpufreq policy to enable fast frequency switching for.
301
* @freqs: contain details of the frequency update.
302
* @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
303
*
304
* This function calls the transition notifiers and adjust_jiffies().
305
*
306
* It is called twice on all CPU frequency changes that have external effects.
307
*/
308
static void cpufreq_notify_transition(struct cpufreq_policy *policy,
309
struct cpufreq_freqs *freqs,
310
unsigned int state)
311
{
312
int cpu;
313
314
BUG_ON(irqs_disabled());
315
316
if (cpufreq_disabled())
317
return;
318
319
freqs->policy = policy;
320
freqs->flags = cpufreq_driver->flags;
321
pr_debug("notification %u of frequency transition to %u kHz\n",
322
state, freqs->new);
323
324
switch (state) {
325
case CPUFREQ_PRECHANGE:
326
/*
327
* Detect if the driver reported a value as "old frequency"
328
* which is not equal to what the cpufreq core thinks is
329
* "old frequency".
330
*/
331
if (policy->cur && policy->cur != freqs->old) {
332
pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
333
freqs->old, policy->cur);
334
freqs->old = policy->cur;
335
}
336
337
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
338
CPUFREQ_PRECHANGE, freqs);
339
340
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
341
break;
342
343
case CPUFREQ_POSTCHANGE:
344
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
345
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
346
cpumask_pr_args(policy->cpus));
347
348
for_each_cpu(cpu, policy->cpus)
349
trace_cpu_frequency(freqs->new, cpu);
350
351
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
352
CPUFREQ_POSTCHANGE, freqs);
353
354
cpufreq_stats_record_transition(policy, freqs->new);
355
policy->cur = freqs->new;
356
}
357
}
358
359
/* Do post notifications when there are chances that transition has failed */
360
static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
361
struct cpufreq_freqs *freqs, int transition_failed)
362
{
363
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
364
if (!transition_failed)
365
return;
366
367
swap(freqs->old, freqs->new);
368
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
369
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
370
}
371
372
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
373
struct cpufreq_freqs *freqs)
374
{
375
376
/*
377
* Catch double invocations of _begin() which lead to self-deadlock.
378
* ASYNC_NOTIFICATION drivers are left out because the cpufreq core
379
* doesn't invoke _begin() on their behalf, and hence the chances of
380
* double invocations are very low. Moreover, there are scenarios
381
* where these checks can emit false-positive warnings in these
382
* drivers; so we avoid that by skipping them altogether.
383
*/
384
WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
385
&& current == policy->transition_task);
386
387
wait:
388
wait_event(policy->transition_wait, !policy->transition_ongoing);
389
390
spin_lock(&policy->transition_lock);
391
392
if (unlikely(policy->transition_ongoing)) {
393
spin_unlock(&policy->transition_lock);
394
goto wait;
395
}
396
397
policy->transition_ongoing = true;
398
policy->transition_task = current;
399
400
spin_unlock(&policy->transition_lock);
401
402
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
403
}
404
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
405
406
void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
407
struct cpufreq_freqs *freqs, int transition_failed)
408
{
409
if (WARN_ON(!policy->transition_ongoing))
410
return;
411
412
cpufreq_notify_post_transition(policy, freqs, transition_failed);
413
414
arch_set_freq_scale(policy->related_cpus,
415
policy->cur,
416
arch_scale_freq_ref(policy->cpu));
417
418
spin_lock(&policy->transition_lock);
419
policy->transition_ongoing = false;
420
policy->transition_task = NULL;
421
spin_unlock(&policy->transition_lock);
422
423
wake_up(&policy->transition_wait);
424
}
425
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
426
427
/*
428
* Fast frequency switching status count. Positive means "enabled", negative
429
* means "disabled" and 0 means "not decided yet".
430
*/
431
static int cpufreq_fast_switch_count;
432
static DEFINE_MUTEX(cpufreq_fast_switch_lock);
433
434
static void cpufreq_list_transition_notifiers(void)
435
{
436
struct notifier_block *nb;
437
438
pr_info("Registered transition notifiers:\n");
439
440
mutex_lock(&cpufreq_transition_notifier_list.mutex);
441
442
for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
443
pr_info("%pS\n", nb->notifier_call);
444
445
mutex_unlock(&cpufreq_transition_notifier_list.mutex);
446
}
447
448
/**
449
* cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
450
* @policy: cpufreq policy to enable fast frequency switching for.
451
*
452
* Try to enable fast frequency switching for @policy.
453
*
454
* The attempt will fail if there is at least one transition notifier registered
455
* at this point, as fast frequency switching is quite fundamentally at odds
456
* with transition notifiers. Thus if successful, it will make registration of
457
* transition notifiers fail going forward.
458
*/
459
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
460
{
461
lockdep_assert_held(&policy->rwsem);
462
463
if (!policy->fast_switch_possible)
464
return;
465
466
mutex_lock(&cpufreq_fast_switch_lock);
467
if (cpufreq_fast_switch_count >= 0) {
468
cpufreq_fast_switch_count++;
469
policy->fast_switch_enabled = true;
470
} else {
471
pr_warn("CPU%u: Fast frequency switching not enabled\n",
472
policy->cpu);
473
cpufreq_list_transition_notifiers();
474
}
475
mutex_unlock(&cpufreq_fast_switch_lock);
476
}
477
EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
478
479
/**
480
* cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
481
* @policy: cpufreq policy to disable fast frequency switching for.
482
*/
483
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
484
{
485
mutex_lock(&cpufreq_fast_switch_lock);
486
if (policy->fast_switch_enabled) {
487
policy->fast_switch_enabled = false;
488
if (!WARN_ON(cpufreq_fast_switch_count <= 0))
489
cpufreq_fast_switch_count--;
490
}
491
mutex_unlock(&cpufreq_fast_switch_lock);
492
}
493
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
494
495
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
496
unsigned int target_freq,
497
unsigned int min, unsigned int max,
498
unsigned int relation)
499
{
500
unsigned int idx;
501
502
target_freq = clamp_val(target_freq, min, max);
503
504
if (!policy->freq_table)
505
return target_freq;
506
507
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
508
policy->cached_resolved_idx = idx;
509
policy->cached_target_freq = target_freq;
510
return policy->freq_table[idx].frequency;
511
}
512
513
/**
514
* cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
515
* one.
516
* @policy: associated policy to interrogate
517
* @target_freq: target frequency to resolve.
518
*
519
* The target to driver frequency mapping is cached in the policy.
520
*
521
* Return: Lowest driver-supported frequency greater than or equal to the
522
* given target_freq, subject to policy (min/max) and driver limitations.
523
*/
524
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
525
unsigned int target_freq)
526
{
527
unsigned int min = READ_ONCE(policy->min);
528
unsigned int max = READ_ONCE(policy->max);
529
530
/*
531
* If this function runs in parallel with cpufreq_set_policy(), it may
532
* read policy->min before the update and policy->max after the update
533
* or the other way around, so there is no ordering guarantee.
534
*
535
* Resolve this by always honoring the max (in case it comes from
536
* thermal throttling or similar).
537
*/
538
if (unlikely(min > max))
539
min = max;
540
541
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
542
}
543
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
544
545
unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
546
{
547
unsigned int latency;
548
549
if (policy->transition_delay_us)
550
return policy->transition_delay_us;
551
552
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
553
if (latency)
554
/* Give a 50% breathing room between updates */
555
return latency + (latency >> 1);
556
557
return USEC_PER_MSEC;
558
}
559
EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
560
561
/*********************************************************************
562
* SYSFS INTERFACE *
563
*********************************************************************/
564
static ssize_t show_boost(struct kobject *kobj,
565
struct kobj_attribute *attr, char *buf)
566
{
567
return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
568
}
569
570
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
571
const char *buf, size_t count)
572
{
573
bool enable;
574
575
if (kstrtobool(buf, &enable))
576
return -EINVAL;
577
578
if (cpufreq_boost_trigger_state(enable)) {
579
pr_err("%s: Cannot %s BOOST!\n",
580
__func__, str_enable_disable(enable));
581
return -EINVAL;
582
}
583
584
pr_debug("%s: cpufreq BOOST %s\n",
585
__func__, str_enabled_disabled(enable));
586
587
return count;
588
}
589
define_one_global_rw(boost);
590
591
static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
592
{
593
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
594
}
595
596
static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
597
{
598
int ret;
599
600
if (policy->boost_enabled == enable)
601
return 0;
602
603
policy->boost_enabled = enable;
604
605
ret = cpufreq_driver->set_boost(policy, enable);
606
if (ret)
607
policy->boost_enabled = !policy->boost_enabled;
608
609
return ret;
610
}
611
612
static ssize_t store_local_boost(struct cpufreq_policy *policy,
613
const char *buf, size_t count)
614
{
615
int ret;
616
bool enable;
617
618
if (kstrtobool(buf, &enable))
619
return -EINVAL;
620
621
if (!cpufreq_driver->boost_enabled)
622
return -EINVAL;
623
624
if (!policy->boost_supported)
625
return -EINVAL;
626
627
ret = policy_set_boost(policy, enable);
628
if (!ret)
629
return count;
630
631
return ret;
632
}
633
634
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
635
636
static struct cpufreq_governor *find_governor(const char *str_governor)
637
{
638
struct cpufreq_governor *t;
639
640
for_each_governor(t)
641
if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
642
return t;
643
644
return NULL;
645
}
646
647
static struct cpufreq_governor *get_governor(const char *str_governor)
648
{
649
struct cpufreq_governor *t;
650
651
mutex_lock(&cpufreq_governor_mutex);
652
t = find_governor(str_governor);
653
if (!t)
654
goto unlock;
655
656
if (!try_module_get(t->owner))
657
t = NULL;
658
659
unlock:
660
mutex_unlock(&cpufreq_governor_mutex);
661
662
return t;
663
}
664
665
static unsigned int cpufreq_parse_policy(char *str_governor)
666
{
667
if (!strncasecmp(str_governor, "performance", strlen("performance")))
668
return CPUFREQ_POLICY_PERFORMANCE;
669
670
if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
671
return CPUFREQ_POLICY_POWERSAVE;
672
673
return CPUFREQ_POLICY_UNKNOWN;
674
}
675
676
/**
677
* cpufreq_parse_governor - parse a governor string only for has_target()
678
* @str_governor: Governor name.
679
*/
680
static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
681
{
682
struct cpufreq_governor *t;
683
684
t = get_governor(str_governor);
685
if (t)
686
return t;
687
688
if (request_module("cpufreq_%s", str_governor))
689
return NULL;
690
691
return get_governor(str_governor);
692
}
693
694
/*
695
* cpufreq_per_cpu_attr_read() / show_##file_name() -
696
* print out cpufreq information
697
*
698
* Write out information from cpufreq_driver->policy[cpu]; object must be
699
* "unsigned int".
700
*/
701
702
#define show_one(file_name, object) \
703
static ssize_t show_##file_name \
704
(struct cpufreq_policy *policy, char *buf) \
705
{ \
706
return sysfs_emit(buf, "%u\n", policy->object); \
707
}
708
709
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
710
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
711
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
712
show_one(scaling_min_freq, min);
713
show_one(scaling_max_freq, max);
714
715
__weak int arch_freq_get_on_cpu(int cpu)
716
{
717
return -EOPNOTSUPP;
718
}
719
720
static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
721
{
722
return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
723
}
724
725
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
726
{
727
ssize_t ret;
728
int freq;
729
730
freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
731
? arch_freq_get_on_cpu(policy->cpu)
732
: 0;
733
734
if (freq > 0)
735
ret = sysfs_emit(buf, "%u\n", freq);
736
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
737
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
738
else
739
ret = sysfs_emit(buf, "%u\n", policy->cur);
740
return ret;
741
}
742
743
/*
744
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
745
*/
746
#define store_one(file_name, object) \
747
static ssize_t store_##file_name \
748
(struct cpufreq_policy *policy, const char *buf, size_t count) \
749
{ \
750
unsigned long val; \
751
int ret; \
752
\
753
ret = kstrtoul(buf, 0, &val); \
754
if (ret) \
755
return ret; \
756
\
757
ret = freq_qos_update_request(policy->object##_freq_req, val);\
758
return ret >= 0 ? count : ret; \
759
}
760
761
store_one(scaling_min_freq, min);
762
store_one(scaling_max_freq, max);
763
764
/*
765
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
766
*/
767
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
768
char *buf)
769
{
770
unsigned int cur_freq = __cpufreq_get(policy);
771
772
if (cur_freq)
773
return sysfs_emit(buf, "%u\n", cur_freq);
774
775
return sysfs_emit(buf, "<unknown>\n");
776
}
777
778
/*
779
* show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
780
*/
781
static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
782
char *buf)
783
{
784
int avg_freq = arch_freq_get_on_cpu(policy->cpu);
785
786
if (avg_freq > 0)
787
return sysfs_emit(buf, "%u\n", avg_freq);
788
return avg_freq != 0 ? avg_freq : -EINVAL;
789
}
790
791
/*
792
* show_scaling_governor - show the current policy for the specified CPU
793
*/
794
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
795
{
796
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
797
return sysfs_emit(buf, "powersave\n");
798
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
799
return sysfs_emit(buf, "performance\n");
800
else if (policy->governor)
801
return sysfs_emit(buf, "%s\n", policy->governor->name);
802
return -EINVAL;
803
}
804
805
/*
806
* store_scaling_governor - store policy for the specified CPU
807
*/
808
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
809
const char *buf, size_t count)
810
{
811
char str_governor[CPUFREQ_NAME_LEN];
812
int ret;
813
814
ret = sscanf(buf, "%15s", str_governor);
815
if (ret != 1)
816
return -EINVAL;
817
818
if (cpufreq_driver->setpolicy) {
819
unsigned int new_pol;
820
821
new_pol = cpufreq_parse_policy(str_governor);
822
if (!new_pol)
823
return -EINVAL;
824
825
ret = cpufreq_set_policy(policy, NULL, new_pol);
826
} else {
827
struct cpufreq_governor *new_gov;
828
829
new_gov = cpufreq_parse_governor(str_governor);
830
if (!new_gov)
831
return -EINVAL;
832
833
ret = cpufreq_set_policy(policy, new_gov,
834
CPUFREQ_POLICY_UNKNOWN);
835
836
module_put(new_gov->owner);
837
}
838
839
return ret ? ret : count;
840
}
841
842
/*
843
* show_scaling_driver - show the cpufreq driver currently loaded
844
*/
845
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
846
{
847
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
848
}
849
850
/*
851
* show_scaling_available_governors - show the available CPUfreq governors
852
*/
853
static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
854
char *buf)
855
{
856
ssize_t i = 0;
857
struct cpufreq_governor *t;
858
859
if (!has_target()) {
860
i += sysfs_emit(buf, "performance powersave");
861
goto out;
862
}
863
864
mutex_lock(&cpufreq_governor_mutex);
865
for_each_governor(t) {
866
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
867
- (CPUFREQ_NAME_LEN + 2)))
868
break;
869
i += sysfs_emit_at(buf, i, "%s ", t->name);
870
}
871
mutex_unlock(&cpufreq_governor_mutex);
872
out:
873
i += sysfs_emit_at(buf, i, "\n");
874
return i;
875
}
876
877
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
878
{
879
ssize_t i = 0;
880
unsigned int cpu;
881
882
for_each_cpu(cpu, mask) {
883
i += sysfs_emit_at(buf, i, "%u ", cpu);
884
if (i >= (PAGE_SIZE - 5))
885
break;
886
}
887
888
/* Remove the extra space at the end */
889
i--;
890
891
i += sysfs_emit_at(buf, i, "\n");
892
return i;
893
}
894
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
895
896
/*
897
* show_related_cpus - show the CPUs affected by each transition even if
898
* hw coordination is in use
899
*/
900
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
901
{
902
return cpufreq_show_cpus(policy->related_cpus, buf);
903
}
904
905
/*
906
* show_affected_cpus - show the CPUs affected by each transition
907
*/
908
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
909
{
910
return cpufreq_show_cpus(policy->cpus, buf);
911
}
912
913
static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
914
const char *buf, size_t count)
915
{
916
unsigned int freq = 0;
917
int ret;
918
919
if (!policy->governor || !policy->governor->store_setspeed)
920
return -EINVAL;
921
922
ret = kstrtouint(buf, 0, &freq);
923
if (ret)
924
return ret;
925
926
policy->governor->store_setspeed(policy, freq);
927
928
return count;
929
}
930
931
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
932
{
933
if (!policy->governor || !policy->governor->show_setspeed)
934
return sysfs_emit(buf, "<unsupported>\n");
935
936
return policy->governor->show_setspeed(policy, buf);
937
}
938
939
/*
940
* show_bios_limit - show the current cpufreq HW/BIOS limitation
941
*/
942
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
943
{
944
unsigned int limit;
945
int ret;
946
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
947
if (!ret)
948
return sysfs_emit(buf, "%u\n", limit);
949
return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
950
}
951
952
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
953
cpufreq_freq_attr_ro(cpuinfo_avg_freq);
954
cpufreq_freq_attr_ro(cpuinfo_min_freq);
955
cpufreq_freq_attr_ro(cpuinfo_max_freq);
956
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
957
cpufreq_freq_attr_ro(scaling_available_governors);
958
cpufreq_freq_attr_ro(scaling_driver);
959
cpufreq_freq_attr_ro(scaling_cur_freq);
960
cpufreq_freq_attr_ro(bios_limit);
961
cpufreq_freq_attr_ro(related_cpus);
962
cpufreq_freq_attr_ro(affected_cpus);
963
cpufreq_freq_attr_rw(scaling_min_freq);
964
cpufreq_freq_attr_rw(scaling_max_freq);
965
cpufreq_freq_attr_rw(scaling_governor);
966
cpufreq_freq_attr_rw(scaling_setspeed);
967
968
static struct attribute *cpufreq_attrs[] = {
969
&cpuinfo_min_freq.attr,
970
&cpuinfo_max_freq.attr,
971
&cpuinfo_transition_latency.attr,
972
&scaling_cur_freq.attr,
973
&scaling_min_freq.attr,
974
&scaling_max_freq.attr,
975
&affected_cpus.attr,
976
&related_cpus.attr,
977
&scaling_governor.attr,
978
&scaling_driver.attr,
979
&scaling_available_governors.attr,
980
&scaling_setspeed.attr,
981
NULL
982
};
983
ATTRIBUTE_GROUPS(cpufreq);
984
985
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
986
#define to_attr(a) container_of(a, struct freq_attr, attr)
987
988
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
989
{
990
struct cpufreq_policy *policy = to_policy(kobj);
991
struct freq_attr *fattr = to_attr(attr);
992
993
if (!fattr->show)
994
return -EIO;
995
996
guard(cpufreq_policy_read)(policy);
997
998
if (likely(!policy_is_inactive(policy)))
999
return fattr->show(policy, buf);
1000
1001
return -EBUSY;
1002
}
1003
1004
static ssize_t store(struct kobject *kobj, struct attribute *attr,
1005
const char *buf, size_t count)
1006
{
1007
struct cpufreq_policy *policy = to_policy(kobj);
1008
struct freq_attr *fattr = to_attr(attr);
1009
1010
if (!fattr->store)
1011
return -EIO;
1012
1013
guard(cpufreq_policy_write)(policy);
1014
1015
if (likely(!policy_is_inactive(policy)))
1016
return fattr->store(policy, buf, count);
1017
1018
return -EBUSY;
1019
}
1020
1021
static void cpufreq_sysfs_release(struct kobject *kobj)
1022
{
1023
struct cpufreq_policy *policy = to_policy(kobj);
1024
pr_debug("last reference is dropped\n");
1025
complete(&policy->kobj_unregister);
1026
}
1027
1028
static const struct sysfs_ops sysfs_ops = {
1029
.show = show,
1030
.store = store,
1031
};
1032
1033
static const struct kobj_type ktype_cpufreq = {
1034
.sysfs_ops = &sysfs_ops,
1035
.default_groups = cpufreq_groups,
1036
.release = cpufreq_sysfs_release,
1037
};
1038
1039
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1040
struct device *dev)
1041
{
1042
if (unlikely(!dev))
1043
return;
1044
1045
if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1046
return;
1047
1048
dev_dbg(dev, "%s: Adding symlink\n", __func__);
1049
if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1050
dev_err(dev, "cpufreq symlink creation failed\n");
1051
}
1052
1053
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1054
struct device *dev)
1055
{
1056
dev_dbg(dev, "%s: Removing symlink\n", __func__);
1057
sysfs_remove_link(&dev->kobj, "cpufreq");
1058
cpumask_clear_cpu(cpu, policy->real_cpus);
1059
}
1060
1061
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1062
{
1063
struct freq_attr **drv_attr;
1064
int ret = 0;
1065
1066
/* Attributes that need freq_table */
1067
if (policy->freq_table) {
1068
ret = sysfs_create_file(&policy->kobj,
1069
&cpufreq_freq_attr_scaling_available_freqs.attr);
1070
if (ret)
1071
return ret;
1072
1073
if (cpufreq_boost_supported()) {
1074
ret = sysfs_create_file(&policy->kobj,
1075
&cpufreq_freq_attr_scaling_boost_freqs.attr);
1076
if (ret)
1077
return ret;
1078
}
1079
}
1080
1081
/* set up files for this cpu device */
1082
drv_attr = cpufreq_driver->attr;
1083
while (drv_attr && *drv_attr) {
1084
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1085
if (ret)
1086
return ret;
1087
drv_attr++;
1088
}
1089
if (cpufreq_driver->get) {
1090
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1091
if (ret)
1092
return ret;
1093
}
1094
1095
if (cpufreq_avg_freq_supported(policy)) {
1096
ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
1097
if (ret)
1098
return ret;
1099
}
1100
1101
if (cpufreq_driver->bios_limit) {
1102
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1103
if (ret)
1104
return ret;
1105
}
1106
1107
if (cpufreq_boost_supported()) {
1108
ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
1109
if (ret)
1110
return ret;
1111
}
1112
1113
return 0;
1114
}
1115
1116
static int cpufreq_init_policy(struct cpufreq_policy *policy)
1117
{
1118
struct cpufreq_governor *gov = NULL;
1119
unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1120
int ret;
1121
1122
if (has_target()) {
1123
/* Update policy governor to the one used before hotplug. */
1124
if (policy->last_governor[0] != '\0')
1125
gov = get_governor(policy->last_governor);
1126
if (gov) {
1127
pr_debug("Restoring governor %s for cpu %d\n",
1128
gov->name, policy->cpu);
1129
} else {
1130
gov = get_governor(default_governor);
1131
}
1132
1133
if (!gov) {
1134
gov = cpufreq_default_governor();
1135
__module_get(gov->owner);
1136
}
1137
1138
} else {
1139
1140
/* Use the default policy if there is no last_policy. */
1141
if (policy->last_policy) {
1142
pol = policy->last_policy;
1143
} else {
1144
pol = cpufreq_parse_policy(default_governor);
1145
/*
1146
* In case the default governor is neither "performance"
1147
* nor "powersave", fall back to the initial policy
1148
* value set by the driver.
1149
*/
1150
if (pol == CPUFREQ_POLICY_UNKNOWN)
1151
pol = policy->policy;
1152
}
1153
if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1154
pol != CPUFREQ_POLICY_POWERSAVE)
1155
return -ENODATA;
1156
}
1157
1158
ret = cpufreq_set_policy(policy, gov, pol);
1159
if (gov)
1160
module_put(gov->owner);
1161
1162
return ret;
1163
}
1164
1165
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1166
{
1167
int ret = 0;
1168
1169
/* Has this CPU been taken care of already? */
1170
if (cpumask_test_cpu(cpu, policy->cpus))
1171
return 0;
1172
1173
guard(cpufreq_policy_write)(policy);
1174
1175
if (has_target())
1176
cpufreq_stop_governor(policy);
1177
1178
cpumask_set_cpu(cpu, policy->cpus);
1179
1180
if (has_target()) {
1181
ret = cpufreq_start_governor(policy);
1182
if (ret)
1183
pr_err("%s: Failed to start governor\n", __func__);
1184
}
1185
1186
return ret;
1187
}
1188
1189
void refresh_frequency_limits(struct cpufreq_policy *policy)
1190
{
1191
if (!policy_is_inactive(policy)) {
1192
pr_debug("updating policy for CPU %u\n", policy->cpu);
1193
1194
cpufreq_set_policy(policy, policy->governor, policy->policy);
1195
}
1196
}
1197
EXPORT_SYMBOL(refresh_frequency_limits);
1198
1199
static void handle_update(struct work_struct *work)
1200
{
1201
struct cpufreq_policy *policy =
1202
container_of(work, struct cpufreq_policy, update);
1203
1204
pr_debug("handle_update for cpu %u called\n", policy->cpu);
1205
1206
guard(cpufreq_policy_write)(policy);
1207
1208
refresh_frequency_limits(policy);
1209
}
1210
1211
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1212
void *data)
1213
{
1214
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1215
1216
schedule_work(&policy->update);
1217
return 0;
1218
}
1219
1220
static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1221
void *data)
1222
{
1223
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1224
1225
schedule_work(&policy->update);
1226
return 0;
1227
}
1228
1229
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1230
{
1231
struct kobject *kobj;
1232
struct completion *cmp;
1233
1234
scoped_guard(cpufreq_policy_write, policy) {
1235
cpufreq_stats_free_table(policy);
1236
kobj = &policy->kobj;
1237
cmp = &policy->kobj_unregister;
1238
}
1239
kobject_put(kobj);
1240
1241
/*
1242
* We need to make sure that the underlying kobj is
1243
* actually not referenced anymore by anybody before we
1244
* proceed with unloading.
1245
*/
1246
pr_debug("waiting for dropping of refcount\n");
1247
wait_for_completion(cmp);
1248
pr_debug("wait complete\n");
1249
}
1250
1251
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1252
{
1253
struct cpufreq_policy *policy;
1254
struct device *dev = get_cpu_device(cpu);
1255
int ret;
1256
1257
if (!dev)
1258
return NULL;
1259
1260
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1261
if (!policy)
1262
return NULL;
1263
1264
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1265
goto err_free_policy;
1266
1267
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1268
goto err_free_cpumask;
1269
1270
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1271
goto err_free_rcpumask;
1272
1273
init_completion(&policy->kobj_unregister);
1274
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1275
cpufreq_global_kobject, "policy%u", cpu);
1276
if (ret) {
1277
dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1278
/*
1279
* The entire policy object will be freed below, but the extra
1280
* memory allocated for the kobject name needs to be freed by
1281
* releasing the kobject.
1282
*/
1283
kobject_put(&policy->kobj);
1284
goto err_free_real_cpus;
1285
}
1286
1287
init_rwsem(&policy->rwsem);
1288
1289
freq_constraints_init(&policy->constraints);
1290
1291
policy->nb_min.notifier_call = cpufreq_notifier_min;
1292
policy->nb_max.notifier_call = cpufreq_notifier_max;
1293
1294
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1295
&policy->nb_min);
1296
if (ret) {
1297
dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
1298
ret, cpu);
1299
goto err_kobj_remove;
1300
}
1301
1302
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1303
&policy->nb_max);
1304
if (ret) {
1305
dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
1306
ret, cpu);
1307
goto err_min_qos_notifier;
1308
}
1309
1310
INIT_LIST_HEAD(&policy->policy_list);
1311
spin_lock_init(&policy->transition_lock);
1312
init_waitqueue_head(&policy->transition_wait);
1313
INIT_WORK(&policy->update, handle_update);
1314
1315
return policy;
1316
1317
err_min_qos_notifier:
1318
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1319
&policy->nb_min);
1320
err_kobj_remove:
1321
cpufreq_policy_put_kobj(policy);
1322
err_free_real_cpus:
1323
free_cpumask_var(policy->real_cpus);
1324
err_free_rcpumask:
1325
free_cpumask_var(policy->related_cpus);
1326
err_free_cpumask:
1327
free_cpumask_var(policy->cpus);
1328
err_free_policy:
1329
kfree(policy);
1330
1331
return NULL;
1332
}
1333
1334
static void cpufreq_policy_free(struct cpufreq_policy *policy)
1335
{
1336
unsigned long flags;
1337
int cpu;
1338
1339
/*
1340
* The callers must ensure the policy is inactive by now, to avoid any
1341
* races with show()/store() callbacks.
1342
*/
1343
if (unlikely(!policy_is_inactive(policy)))
1344
pr_warn("%s: Freeing active policy\n", __func__);
1345
1346
/* Remove policy from list */
1347
write_lock_irqsave(&cpufreq_driver_lock, flags);
1348
list_del(&policy->policy_list);
1349
1350
for_each_cpu(cpu, policy->related_cpus)
1351
per_cpu(cpufreq_cpu_data, cpu) = NULL;
1352
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1353
1354
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1355
&policy->nb_max);
1356
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1357
&policy->nb_min);
1358
1359
/* Cancel any pending policy->update work before freeing the policy. */
1360
cancel_work_sync(&policy->update);
1361
1362
if (policy->max_freq_req) {
1363
/*
1364
* Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1365
* notification, since CPUFREQ_CREATE_POLICY notification was
1366
* sent after adding max_freq_req earlier.
1367
*/
1368
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1369
CPUFREQ_REMOVE_POLICY, policy);
1370
freq_qos_remove_request(policy->max_freq_req);
1371
}
1372
1373
freq_qos_remove_request(policy->min_freq_req);
1374
kfree(policy->min_freq_req);
1375
1376
cpufreq_policy_put_kobj(policy);
1377
free_cpumask_var(policy->real_cpus);
1378
free_cpumask_var(policy->related_cpus);
1379
free_cpumask_var(policy->cpus);
1380
kfree(policy);
1381
}
1382
1383
static int cpufreq_policy_online(struct cpufreq_policy *policy,
1384
unsigned int cpu, bool new_policy)
1385
{
1386
unsigned long flags;
1387
unsigned int j;
1388
int ret;
1389
1390
guard(cpufreq_policy_write)(policy);
1391
1392
policy->cpu = cpu;
1393
policy->governor = NULL;
1394
1395
if (!new_policy && cpufreq_driver->online) {
1396
/* Recover policy->cpus using related_cpus */
1397
cpumask_copy(policy->cpus, policy->related_cpus);
1398
1399
ret = cpufreq_driver->online(policy);
1400
if (ret) {
1401
pr_debug("%s: %d: initialization failed\n", __func__,
1402
__LINE__);
1403
goto out_exit_policy;
1404
}
1405
} else {
1406
cpumask_copy(policy->cpus, cpumask_of(cpu));
1407
1408
/*
1409
* Call driver. From then on the cpufreq must be able
1410
* to accept all calls to ->verify and ->setpolicy for this CPU.
1411
*/
1412
ret = cpufreq_driver->init(policy);
1413
if (ret) {
1414
pr_debug("%s: %d: initialization failed\n", __func__,
1415
__LINE__);
1416
goto out_clear_policy;
1417
}
1418
1419
/*
1420
* The initialization has succeeded and the policy is online.
1421
* If there is a problem with its frequency table, take it
1422
* offline and drop it.
1423
*/
1424
ret = cpufreq_table_validate_and_sort(policy);
1425
if (ret)
1426
goto out_offline_policy;
1427
1428
/* related_cpus should at least include policy->cpus. */
1429
cpumask_copy(policy->related_cpus, policy->cpus);
1430
}
1431
1432
/*
1433
* affected cpus must always be the one, which are online. We aren't
1434
* managing offline cpus here.
1435
*/
1436
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1437
1438
if (new_policy) {
1439
for_each_cpu(j, policy->related_cpus) {
1440
per_cpu(cpufreq_cpu_data, j) = policy;
1441
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1442
}
1443
1444
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1445
GFP_KERNEL);
1446
if (!policy->min_freq_req) {
1447
ret = -ENOMEM;
1448
goto out_destroy_policy;
1449
}
1450
1451
ret = freq_qos_add_request(&policy->constraints,
1452
policy->min_freq_req, FREQ_QOS_MIN,
1453
FREQ_QOS_MIN_DEFAULT_VALUE);
1454
if (ret < 0) {
1455
/*
1456
* So we don't call freq_qos_remove_request() for an
1457
* uninitialized request.
1458
*/
1459
kfree(policy->min_freq_req);
1460
policy->min_freq_req = NULL;
1461
goto out_destroy_policy;
1462
}
1463
1464
/*
1465
* This must be initialized right here to avoid calling
1466
* freq_qos_remove_request() on uninitialized request in case
1467
* of errors.
1468
*/
1469
policy->max_freq_req = policy->min_freq_req + 1;
1470
1471
ret = freq_qos_add_request(&policy->constraints,
1472
policy->max_freq_req, FREQ_QOS_MAX,
1473
FREQ_QOS_MAX_DEFAULT_VALUE);
1474
if (ret < 0) {
1475
policy->max_freq_req = NULL;
1476
goto out_destroy_policy;
1477
}
1478
1479
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1480
CPUFREQ_CREATE_POLICY, policy);
1481
} else {
1482
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
1483
if (ret < 0)
1484
goto out_destroy_policy;
1485
}
1486
1487
if (cpufreq_driver->get && has_target()) {
1488
policy->cur = cpufreq_driver->get(policy->cpu);
1489
if (!policy->cur) {
1490
ret = -EIO;
1491
pr_err("%s: ->get() failed\n", __func__);
1492
goto out_destroy_policy;
1493
}
1494
}
1495
1496
/*
1497
* Sometimes boot loaders set CPU frequency to a value outside of
1498
* frequency table present with cpufreq core. In such cases CPU might be
1499
* unstable if it has to run on that frequency for long duration of time
1500
* and so its better to set it to a frequency which is specified in
1501
* freq-table. This also makes cpufreq stats inconsistent as
1502
* cpufreq-stats would fail to register because current frequency of CPU
1503
* isn't found in freq-table.
1504
*
1505
* Because we don't want this change to effect boot process badly, we go
1506
* for the next freq which is >= policy->cur ('cur' must be set by now,
1507
* otherwise we will end up setting freq to lowest of the table as 'cur'
1508
* is initialized to zero).
1509
*
1510
* We are passing target-freq as "policy->cur - 1" otherwise
1511
* __cpufreq_driver_target() would simply fail, as policy->cur will be
1512
* equal to target-freq.
1513
*/
1514
if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1515
&& has_target()) {
1516
unsigned int old_freq = policy->cur;
1517
1518
/* Are we running at unknown frequency ? */
1519
ret = cpufreq_frequency_table_get_index(policy, old_freq);
1520
if (ret == -EINVAL) {
1521
ret = __cpufreq_driver_target(policy, old_freq - 1,
1522
CPUFREQ_RELATION_L);
1523
1524
/*
1525
* Reaching here after boot in a few seconds may not
1526
* mean that system will remain stable at "unknown"
1527
* frequency for longer duration. Hence, a BUG_ON().
1528
*/
1529
BUG_ON(ret);
1530
pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
1531
__func__, policy->cpu, old_freq, policy->cur);
1532
}
1533
}
1534
1535
if (new_policy) {
1536
ret = cpufreq_add_dev_interface(policy);
1537
if (ret)
1538
goto out_destroy_policy;
1539
1540
cpufreq_stats_create_table(policy);
1541
1542
write_lock_irqsave(&cpufreq_driver_lock, flags);
1543
list_add(&policy->policy_list, &cpufreq_policy_list);
1544
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1545
1546
/*
1547
* Register with the energy model before
1548
* em_rebuild_sched_domains() is called, which will result
1549
* in rebuilding of the sched domains, which should only be done
1550
* once the energy model is properly initialized for the policy
1551
* first.
1552
*
1553
* Also, this should be called before the policy is registered
1554
* with cooling framework.
1555
*/
1556
if (cpufreq_driver->register_em)
1557
cpufreq_driver->register_em(policy);
1558
}
1559
1560
ret = cpufreq_init_policy(policy);
1561
if (ret) {
1562
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1563
__func__, cpu, ret);
1564
goto out_destroy_policy;
1565
}
1566
1567
return 0;
1568
1569
out_destroy_policy:
1570
for_each_cpu(j, policy->real_cpus)
1571
remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1572
1573
out_offline_policy:
1574
if (cpufreq_driver->offline)
1575
cpufreq_driver->offline(policy);
1576
1577
out_exit_policy:
1578
if (cpufreq_driver->exit)
1579
cpufreq_driver->exit(policy);
1580
1581
out_clear_policy:
1582
cpumask_clear(policy->cpus);
1583
1584
return ret;
1585
}
1586
1587
static int cpufreq_online(unsigned int cpu)
1588
{
1589
struct cpufreq_policy *policy;
1590
bool new_policy;
1591
int ret;
1592
1593
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1594
1595
/* Check if this CPU already has a policy to manage it */
1596
policy = per_cpu(cpufreq_cpu_data, cpu);
1597
if (policy) {
1598
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1599
if (!policy_is_inactive(policy))
1600
return cpufreq_add_policy_cpu(policy, cpu);
1601
1602
/* This is the only online CPU for the policy. Start over. */
1603
new_policy = false;
1604
} else {
1605
new_policy = true;
1606
policy = cpufreq_policy_alloc(cpu);
1607
if (!policy)
1608
return -ENOMEM;
1609
}
1610
1611
ret = cpufreq_policy_online(policy, cpu, new_policy);
1612
if (ret) {
1613
cpufreq_policy_free(policy);
1614
return ret;
1615
}
1616
1617
kobject_uevent(&policy->kobj, KOBJ_ADD);
1618
1619
/* Callback for handling stuff after policy is ready */
1620
if (cpufreq_driver->ready)
1621
cpufreq_driver->ready(policy);
1622
1623
/* Register cpufreq cooling only for a new policy */
1624
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
1625
policy->cdev = of_cpufreq_cooling_register(policy);
1626
1627
/*
1628
* Let the per-policy boost flag mirror the cpufreq_driver boost during
1629
* initialization for a new policy. For an existing policy, maintain the
1630
* previous boost value unless global boost is disabled.
1631
*/
1632
if (cpufreq_driver->set_boost && policy->boost_supported &&
1633
(new_policy || !cpufreq_boost_enabled())) {
1634
ret = policy_set_boost(policy, cpufreq_boost_enabled());
1635
if (ret) {
1636
/* If the set_boost fails, the online operation is not affected */
1637
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
1638
str_enable_disable(cpufreq_boost_enabled()));
1639
}
1640
}
1641
1642
pr_debug("initialization complete\n");
1643
1644
return 0;
1645
}
1646
1647
/**
1648
* cpufreq_add_dev - the cpufreq interface for a CPU device.
1649
* @dev: CPU device.
1650
* @sif: Subsystem interface structure pointer (not used)
1651
*/
1652
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1653
{
1654
struct cpufreq_policy *policy;
1655
unsigned cpu = dev->id;
1656
int ret;
1657
1658
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1659
1660
if (cpu_online(cpu)) {
1661
ret = cpufreq_online(cpu);
1662
if (ret)
1663
return ret;
1664
}
1665
1666
/* Create sysfs link on CPU registration */
1667
policy = per_cpu(cpufreq_cpu_data, cpu);
1668
if (policy)
1669
add_cpu_dev_symlink(policy, cpu, dev);
1670
1671
return 0;
1672
}
1673
1674
static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1675
{
1676
int ret;
1677
1678
if (has_target())
1679
cpufreq_stop_governor(policy);
1680
1681
cpumask_clear_cpu(cpu, policy->cpus);
1682
1683
if (!policy_is_inactive(policy)) {
1684
/* Nominate a new CPU if necessary. */
1685
if (cpu == policy->cpu)
1686
policy->cpu = cpumask_any(policy->cpus);
1687
1688
/* Start the governor again for the active policy. */
1689
if (has_target()) {
1690
ret = cpufreq_start_governor(policy);
1691
if (ret)
1692
pr_err("%s: Failed to start governor\n", __func__);
1693
}
1694
1695
return;
1696
}
1697
1698
if (has_target()) {
1699
strscpy(policy->last_governor, policy->governor->name,
1700
CPUFREQ_NAME_LEN);
1701
cpufreq_exit_governor(policy);
1702
} else {
1703
policy->last_policy = policy->policy;
1704
}
1705
1706
/*
1707
* Perform the ->offline() during light-weight tear-down, as
1708
* that allows fast recovery when the CPU comes back.
1709
*/
1710
if (cpufreq_driver->offline) {
1711
cpufreq_driver->offline(policy);
1712
return;
1713
}
1714
1715
if (cpufreq_driver->exit)
1716
cpufreq_driver->exit(policy);
1717
1718
policy->freq_table = NULL;
1719
}
1720
1721
static int cpufreq_offline(unsigned int cpu)
1722
{
1723
struct cpufreq_policy *policy;
1724
1725
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1726
1727
policy = cpufreq_cpu_get_raw(cpu);
1728
if (!policy) {
1729
pr_debug("%s: No cpu_data found\n", __func__);
1730
return 0;
1731
}
1732
1733
guard(cpufreq_policy_write)(policy);
1734
1735
__cpufreq_offline(cpu, policy);
1736
1737
return 0;
1738
}
1739
1740
/*
1741
* cpufreq_remove_dev - remove a CPU device
1742
*
1743
* Removes the cpufreq interface for a CPU device.
1744
*/
1745
static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1746
{
1747
unsigned int cpu = dev->id;
1748
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1749
1750
if (!policy)
1751
return;
1752
1753
scoped_guard(cpufreq_policy_write, policy) {
1754
if (cpu_online(cpu))
1755
__cpufreq_offline(cpu, policy);
1756
1757
remove_cpu_dev_symlink(policy, cpu, dev);
1758
1759
if (!cpumask_empty(policy->real_cpus))
1760
return;
1761
1762
/*
1763
* Unregister cpufreq cooling once all the CPUs of the policy
1764
* are removed.
1765
*/
1766
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1767
cpufreq_cooling_unregister(policy->cdev);
1768
policy->cdev = NULL;
1769
}
1770
1771
/* We did light-weight exit earlier, do full tear down now */
1772
if (cpufreq_driver->offline && cpufreq_driver->exit)
1773
cpufreq_driver->exit(policy);
1774
}
1775
1776
cpufreq_policy_free(policy);
1777
}
1778
1779
/**
1780
* cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1781
* @policy: Policy managing CPUs.
1782
* @new_freq: New CPU frequency.
1783
*
1784
* Adjust to the current frequency first and clean up later by either calling
1785
* cpufreq_update_policy(), or scheduling handle_update().
1786
*/
1787
static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1788
unsigned int new_freq)
1789
{
1790
struct cpufreq_freqs freqs;
1791
1792
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1793
policy->cur, new_freq);
1794
1795
freqs.old = policy->cur;
1796
freqs.new = new_freq;
1797
1798
cpufreq_freq_transition_begin(policy, &freqs);
1799
cpufreq_freq_transition_end(policy, &freqs, 0);
1800
}
1801
1802
static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1803
{
1804
unsigned int new_freq;
1805
1806
if (!cpufreq_driver->get)
1807
return 0;
1808
1809
new_freq = cpufreq_driver->get(policy->cpu);
1810
if (!new_freq)
1811
return 0;
1812
1813
/*
1814
* If fast frequency switching is used with the given policy, the check
1815
* against policy->cur is pointless, so skip it in that case.
1816
*/
1817
if (policy->fast_switch_enabled || !has_target())
1818
return new_freq;
1819
1820
if (policy->cur != new_freq) {
1821
/*
1822
* For some platforms, the frequency returned by hardware may be
1823
* slightly different from what is provided in the frequency
1824
* table, for example hardware may return 499 MHz instead of 500
1825
* MHz. In such cases it is better to avoid getting into
1826
* unnecessary frequency updates.
1827
*/
1828
if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
1829
return policy->cur;
1830
1831
cpufreq_out_of_sync(policy, new_freq);
1832
if (update)
1833
schedule_work(&policy->update);
1834
}
1835
1836
return new_freq;
1837
}
1838
1839
/**
1840
* cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1841
* @cpu: CPU number
1842
*
1843
* This is the last known freq, without actually getting it from the driver.
1844
* Return value will be same as what is shown in scaling_cur_freq in sysfs.
1845
*/
1846
unsigned int cpufreq_quick_get(unsigned int cpu)
1847
{
1848
unsigned long flags;
1849
1850
read_lock_irqsave(&cpufreq_driver_lock, flags);
1851
1852
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1853
unsigned int ret_freq = cpufreq_driver->get(cpu);
1854
1855
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1856
1857
return ret_freq;
1858
}
1859
1860
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1861
1862
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1863
if (policy)
1864
return policy->cur;
1865
1866
return 0;
1867
}
1868
EXPORT_SYMBOL(cpufreq_quick_get);
1869
1870
/**
1871
* cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1872
* @cpu: CPU number
1873
*
1874
* Just return the max possible frequency for a given CPU.
1875
*/
1876
unsigned int cpufreq_quick_get_max(unsigned int cpu)
1877
{
1878
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1879
if (policy)
1880
return policy->max;
1881
1882
return 0;
1883
}
1884
EXPORT_SYMBOL(cpufreq_quick_get_max);
1885
1886
/**
1887
* cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1888
* @cpu: CPU number
1889
*
1890
* The default return value is the max_freq field of cpuinfo.
1891
*/
1892
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1893
{
1894
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1895
if (policy)
1896
return policy->cpuinfo.max_freq;
1897
1898
return 0;
1899
}
1900
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1901
1902
static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1903
{
1904
if (unlikely(policy_is_inactive(policy)))
1905
return 0;
1906
1907
return cpufreq_verify_current_freq(policy, true);
1908
}
1909
1910
/**
1911
* cpufreq_get - get the current CPU frequency (in kHz)
1912
* @cpu: CPU number
1913
*
1914
* Get the CPU current (static) CPU frequency
1915
*/
1916
unsigned int cpufreq_get(unsigned int cpu)
1917
{
1918
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1919
if (!policy)
1920
return 0;
1921
1922
guard(cpufreq_policy_read)(policy);
1923
1924
return __cpufreq_get(policy);
1925
}
1926
EXPORT_SYMBOL(cpufreq_get);
1927
1928
static struct subsys_interface cpufreq_interface = {
1929
.name = "cpufreq",
1930
.subsys = &cpu_subsys,
1931
.add_dev = cpufreq_add_dev,
1932
.remove_dev = cpufreq_remove_dev,
1933
};
1934
1935
/*
1936
* In case platform wants some specific frequency to be configured
1937
* during suspend..
1938
*/
1939
int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1940
{
1941
int ret;
1942
1943
if (!policy->suspend_freq) {
1944
pr_debug("%s: suspend_freq not defined\n", __func__);
1945
return 0;
1946
}
1947
1948
pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1949
policy->suspend_freq);
1950
1951
ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1952
CPUFREQ_RELATION_H);
1953
if (ret)
1954
pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1955
__func__, policy->suspend_freq, ret);
1956
1957
return ret;
1958
}
1959
EXPORT_SYMBOL(cpufreq_generic_suspend);
1960
1961
/**
1962
* cpufreq_suspend() - Suspend CPUFreq governors.
1963
*
1964
* Called during system wide Suspend/Hibernate cycles for suspending governors
1965
* as some platforms can't change frequency after this point in suspend cycle.
1966
* Because some of the devices (like: i2c, regulators, etc) they use for
1967
* changing frequency are suspended quickly after this point.
1968
*/
1969
void cpufreq_suspend(void)
1970
{
1971
struct cpufreq_policy *policy;
1972
1973
if (!cpufreq_driver)
1974
return;
1975
1976
if (!has_target() && !cpufreq_driver->suspend)
1977
goto suspend;
1978
1979
pr_debug("%s: Suspending Governors\n", __func__);
1980
1981
for_each_active_policy(policy) {
1982
if (has_target()) {
1983
scoped_guard(cpufreq_policy_write, policy) {
1984
cpufreq_stop_governor(policy);
1985
}
1986
}
1987
1988
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1989
pr_err("%s: Failed to suspend driver: %s\n", __func__,
1990
cpufreq_driver->name);
1991
}
1992
1993
suspend:
1994
cpufreq_suspended = true;
1995
}
1996
1997
/**
1998
* cpufreq_resume() - Resume CPUFreq governors.
1999
*
2000
* Called during system wide Suspend/Hibernate cycle for resuming governors that
2001
* are suspended with cpufreq_suspend().
2002
*/
2003
void cpufreq_resume(void)
2004
{
2005
struct cpufreq_policy *policy;
2006
int ret;
2007
2008
if (!cpufreq_driver)
2009
return;
2010
2011
if (unlikely(!cpufreq_suspended))
2012
return;
2013
2014
cpufreq_suspended = false;
2015
2016
if (!has_target() && !cpufreq_driver->resume)
2017
return;
2018
2019
pr_debug("%s: Resuming Governors\n", __func__);
2020
2021
for_each_active_policy(policy) {
2022
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
2023
pr_err("%s: Failed to resume driver: %s\n", __func__,
2024
cpufreq_driver->name);
2025
} else if (has_target()) {
2026
scoped_guard(cpufreq_policy_write, policy) {
2027
ret = cpufreq_start_governor(policy);
2028
}
2029
2030
if (ret)
2031
pr_err("%s: Failed to start governor for CPU%u's policy\n",
2032
__func__, policy->cpu);
2033
}
2034
}
2035
}
2036
2037
/**
2038
* cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
2039
* @flags: Flags to test against the current cpufreq driver's flags.
2040
*
2041
* Assumes that the driver is there, so callers must ensure that this is the
2042
* case.
2043
*/
2044
bool cpufreq_driver_test_flags(u16 flags)
2045
{
2046
return !!(cpufreq_driver->flags & flags);
2047
}
2048
2049
/**
2050
* cpufreq_get_current_driver - Return the current driver's name.
2051
*
2052
* Return the name string of the currently registered cpufreq driver or NULL if
2053
* none.
2054
*/
2055
const char *cpufreq_get_current_driver(void)
2056
{
2057
if (cpufreq_driver)
2058
return cpufreq_driver->name;
2059
2060
return NULL;
2061
}
2062
EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
2063
2064
/**
2065
* cpufreq_get_driver_data - Return current driver data.
2066
*
2067
* Return the private data of the currently registered cpufreq driver, or NULL
2068
* if no cpufreq driver has been registered.
2069
*/
2070
void *cpufreq_get_driver_data(void)
2071
{
2072
if (cpufreq_driver)
2073
return cpufreq_driver->driver_data;
2074
2075
return NULL;
2076
}
2077
EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
2078
2079
/*********************************************************************
2080
* NOTIFIER LISTS INTERFACE *
2081
*********************************************************************/
2082
2083
/**
2084
* cpufreq_register_notifier - Register a notifier with cpufreq.
2085
* @nb: notifier function to register.
2086
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2087
*
2088
* Add a notifier to one of two lists: either a list of notifiers that run on
2089
* clock rate changes (once before and once after every transition), or a list
2090
* of notifiers that ron on cpufreq policy changes.
2091
*
2092
* This function may sleep and it has the same return values as
2093
* blocking_notifier_chain_register().
2094
*/
2095
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2096
{
2097
int ret;
2098
2099
if (cpufreq_disabled())
2100
return -EINVAL;
2101
2102
switch (list) {
2103
case CPUFREQ_TRANSITION_NOTIFIER:
2104
mutex_lock(&cpufreq_fast_switch_lock);
2105
2106
if (cpufreq_fast_switch_count > 0) {
2107
mutex_unlock(&cpufreq_fast_switch_lock);
2108
return -EBUSY;
2109
}
2110
ret = srcu_notifier_chain_register(
2111
&cpufreq_transition_notifier_list, nb);
2112
if (!ret)
2113
cpufreq_fast_switch_count--;
2114
2115
mutex_unlock(&cpufreq_fast_switch_lock);
2116
break;
2117
case CPUFREQ_POLICY_NOTIFIER:
2118
ret = blocking_notifier_chain_register(
2119
&cpufreq_policy_notifier_list, nb);
2120
break;
2121
default:
2122
ret = -EINVAL;
2123
}
2124
2125
return ret;
2126
}
2127
EXPORT_SYMBOL(cpufreq_register_notifier);
2128
2129
/**
2130
* cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2131
* @nb: notifier block to be unregistered.
2132
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2133
*
2134
* Remove a notifier from one of the cpufreq notifier lists.
2135
*
2136
* This function may sleep and it has the same return values as
2137
* blocking_notifier_chain_unregister().
2138
*/
2139
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2140
{
2141
int ret;
2142
2143
if (cpufreq_disabled())
2144
return -EINVAL;
2145
2146
switch (list) {
2147
case CPUFREQ_TRANSITION_NOTIFIER:
2148
mutex_lock(&cpufreq_fast_switch_lock);
2149
2150
ret = srcu_notifier_chain_unregister(
2151
&cpufreq_transition_notifier_list, nb);
2152
if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2153
cpufreq_fast_switch_count++;
2154
2155
mutex_unlock(&cpufreq_fast_switch_lock);
2156
break;
2157
case CPUFREQ_POLICY_NOTIFIER:
2158
ret = blocking_notifier_chain_unregister(
2159
&cpufreq_policy_notifier_list, nb);
2160
break;
2161
default:
2162
ret = -EINVAL;
2163
}
2164
2165
return ret;
2166
}
2167
EXPORT_SYMBOL(cpufreq_unregister_notifier);
2168
2169
2170
/*********************************************************************
2171
* GOVERNORS *
2172
*********************************************************************/
2173
2174
/**
2175
* cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2176
* @policy: cpufreq policy to switch the frequency for.
2177
* @target_freq: New frequency to set (may be approximate).
2178
*
2179
* Carry out a fast frequency switch without sleeping.
2180
*
2181
* The driver's ->fast_switch() callback invoked by this function must be
2182
* suitable for being called from within RCU-sched read-side critical sections
2183
* and it is expected to select the minimum available frequency greater than or
2184
* equal to @target_freq (CPUFREQ_RELATION_L).
2185
*
2186
* This function must not be called if policy->fast_switch_enabled is unset.
2187
*
2188
* Governors calling this function must guarantee that it will never be invoked
2189
* twice in parallel for the same policy and that it will never be called in
2190
* parallel with either ->target() or ->target_index() for the same policy.
2191
*
2192
* Returns the actual frequency set for the CPU.
2193
*
2194
* If 0 is returned by the driver's ->fast_switch() callback to indicate an
2195
* error condition, the hardware configuration must be preserved.
2196
*/
2197
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2198
unsigned int target_freq)
2199
{
2200
unsigned int freq;
2201
int cpu;
2202
2203
target_freq = clamp_val(target_freq, policy->min, policy->max);
2204
freq = cpufreq_driver->fast_switch(policy, target_freq);
2205
2206
if (!freq)
2207
return 0;
2208
2209
policy->cur = freq;
2210
arch_set_freq_scale(policy->related_cpus, freq,
2211
arch_scale_freq_ref(policy->cpu));
2212
cpufreq_stats_record_transition(policy, freq);
2213
2214
if (trace_cpu_frequency_enabled()) {
2215
for_each_cpu(cpu, policy->cpus)
2216
trace_cpu_frequency(freq, cpu);
2217
}
2218
2219
return freq;
2220
}
2221
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2222
2223
/**
2224
* cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2225
* @cpu: Target CPU.
2226
* @min_perf: Minimum (required) performance level (units of @capacity).
2227
* @target_perf: Target (desired) performance level (units of @capacity).
2228
* @capacity: Capacity of the target CPU.
2229
*
2230
* Carry out a fast performance level switch of @cpu without sleeping.
2231
*
2232
* The driver's ->adjust_perf() callback invoked by this function must be
2233
* suitable for being called from within RCU-sched read-side critical sections
2234
* and it is expected to select a suitable performance level equal to or above
2235
* @min_perf and preferably equal to or below @target_perf.
2236
*
2237
* This function must not be called if policy->fast_switch_enabled is unset.
2238
*
2239
* Governors calling this function must guarantee that it will never be invoked
2240
* twice in parallel for the same CPU and that it will never be called in
2241
* parallel with either ->target() or ->target_index() or ->fast_switch() for
2242
* the same CPU.
2243
*/
2244
void cpufreq_driver_adjust_perf(unsigned int cpu,
2245
unsigned long min_perf,
2246
unsigned long target_perf,
2247
unsigned long capacity)
2248
{
2249
cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2250
}
2251
2252
/**
2253
* cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2254
*
2255
* Return 'true' if the ->adjust_perf callback is present for the
2256
* current driver or 'false' otherwise.
2257
*/
2258
bool cpufreq_driver_has_adjust_perf(void)
2259
{
2260
return !!cpufreq_driver->adjust_perf;
2261
}
2262
2263
/* Must set freqs->new to intermediate frequency */
2264
static int __target_intermediate(struct cpufreq_policy *policy,
2265
struct cpufreq_freqs *freqs, int index)
2266
{
2267
int ret;
2268
2269
freqs->new = cpufreq_driver->get_intermediate(policy, index);
2270
2271
/* We don't need to switch to intermediate freq */
2272
if (!freqs->new)
2273
return 0;
2274
2275
pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2276
__func__, policy->cpu, freqs->old, freqs->new);
2277
2278
cpufreq_freq_transition_begin(policy, freqs);
2279
ret = cpufreq_driver->target_intermediate(policy, index);
2280
cpufreq_freq_transition_end(policy, freqs, ret);
2281
2282
if (ret)
2283
pr_err("%s: Failed to change to intermediate frequency: %d\n",
2284
__func__, ret);
2285
2286
return ret;
2287
}
2288
2289
static int __target_index(struct cpufreq_policy *policy, int index)
2290
{
2291
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2292
unsigned int restore_freq, intermediate_freq = 0;
2293
unsigned int newfreq = policy->freq_table[index].frequency;
2294
int retval = -EINVAL;
2295
bool notify;
2296
2297
if (newfreq == policy->cur)
2298
return 0;
2299
2300
/* Save last value to restore later on errors */
2301
restore_freq = policy->cur;
2302
2303
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2304
if (notify) {
2305
/* Handle switching to intermediate frequency */
2306
if (cpufreq_driver->get_intermediate) {
2307
retval = __target_intermediate(policy, &freqs, index);
2308
if (retval)
2309
return retval;
2310
2311
intermediate_freq = freqs.new;
2312
/* Set old freq to intermediate */
2313
if (intermediate_freq)
2314
freqs.old = freqs.new;
2315
}
2316
2317
freqs.new = newfreq;
2318
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2319
__func__, policy->cpu, freqs.old, freqs.new);
2320
2321
cpufreq_freq_transition_begin(policy, &freqs);
2322
}
2323
2324
retval = cpufreq_driver->target_index(policy, index);
2325
if (retval)
2326
pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2327
retval);
2328
2329
if (notify) {
2330
cpufreq_freq_transition_end(policy, &freqs, retval);
2331
2332
/*
2333
* Failed after setting to intermediate freq? Driver should have
2334
* reverted back to initial frequency and so should we. Check
2335
* here for intermediate_freq instead of get_intermediate, in
2336
* case we haven't switched to intermediate freq at all.
2337
*/
2338
if (unlikely(retval && intermediate_freq)) {
2339
freqs.old = intermediate_freq;
2340
freqs.new = restore_freq;
2341
cpufreq_freq_transition_begin(policy, &freqs);
2342
cpufreq_freq_transition_end(policy, &freqs, 0);
2343
}
2344
}
2345
2346
return retval;
2347
}
2348
2349
int __cpufreq_driver_target(struct cpufreq_policy *policy,
2350
unsigned int target_freq,
2351
unsigned int relation)
2352
{
2353
unsigned int old_target_freq = target_freq;
2354
2355
if (cpufreq_disabled())
2356
return -ENODEV;
2357
2358
target_freq = __resolve_freq(policy, target_freq, policy->min,
2359
policy->max, relation);
2360
2361
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2362
policy->cpu, target_freq, relation, old_target_freq);
2363
2364
/*
2365
* This might look like a redundant call as we are checking it again
2366
* after finding index. But it is left intentionally for cases where
2367
* exactly same freq is called again and so we can save on few function
2368
* calls.
2369
*/
2370
if (target_freq == policy->cur &&
2371
!(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2372
return 0;
2373
2374
if (cpufreq_driver->target) {
2375
/*
2376
* If the driver hasn't setup a single inefficient frequency,
2377
* it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2378
*/
2379
if (!policy->efficiencies_available)
2380
relation &= ~CPUFREQ_RELATION_E;
2381
2382
return cpufreq_driver->target(policy, target_freq, relation);
2383
}
2384
2385
if (!cpufreq_driver->target_index)
2386
return -EINVAL;
2387
2388
return __target_index(policy, policy->cached_resolved_idx);
2389
}
2390
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2391
2392
int cpufreq_driver_target(struct cpufreq_policy *policy,
2393
unsigned int target_freq,
2394
unsigned int relation)
2395
{
2396
guard(cpufreq_policy_write)(policy);
2397
2398
return __cpufreq_driver_target(policy, target_freq, relation);
2399
}
2400
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2401
2402
__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2403
{
2404
return NULL;
2405
}
2406
2407
static int cpufreq_init_governor(struct cpufreq_policy *policy)
2408
{
2409
int ret;
2410
2411
/* Don't start any governor operations if we are entering suspend */
2412
if (cpufreq_suspended)
2413
return 0;
2414
/*
2415
* Governor might not be initiated here if ACPI _PPC changed
2416
* notification happened, so check it.
2417
*/
2418
if (!policy->governor)
2419
return -EINVAL;
2420
2421
/* Platform doesn't want dynamic frequency switching ? */
2422
if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2423
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2424
struct cpufreq_governor *gov = cpufreq_fallback_governor();
2425
2426
if (gov) {
2427
pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2428
policy->governor->name, gov->name);
2429
policy->governor = gov;
2430
} else {
2431
return -EINVAL;
2432
}
2433
}
2434
2435
if (!try_module_get(policy->governor->owner))
2436
return -EINVAL;
2437
2438
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2439
2440
if (policy->governor->init) {
2441
ret = policy->governor->init(policy);
2442
if (ret) {
2443
module_put(policy->governor->owner);
2444
return ret;
2445
}
2446
}
2447
2448
policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2449
2450
return 0;
2451
}
2452
2453
static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2454
{
2455
if (cpufreq_suspended || !policy->governor)
2456
return;
2457
2458
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2459
2460
if (policy->governor->exit)
2461
policy->governor->exit(policy);
2462
2463
module_put(policy->governor->owner);
2464
}
2465
2466
int cpufreq_start_governor(struct cpufreq_policy *policy)
2467
{
2468
int ret;
2469
2470
if (cpufreq_suspended)
2471
return 0;
2472
2473
if (!policy->governor)
2474
return -EINVAL;
2475
2476
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2477
2478
cpufreq_verify_current_freq(policy, false);
2479
2480
if (policy->governor->start) {
2481
ret = policy->governor->start(policy);
2482
if (ret)
2483
return ret;
2484
}
2485
2486
if (policy->governor->limits)
2487
policy->governor->limits(policy);
2488
2489
return 0;
2490
}
2491
2492
void cpufreq_stop_governor(struct cpufreq_policy *policy)
2493
{
2494
if (cpufreq_suspended || !policy->governor)
2495
return;
2496
2497
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2498
2499
if (policy->governor->stop)
2500
policy->governor->stop(policy);
2501
}
2502
2503
static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2504
{
2505
if (cpufreq_suspended || !policy->governor)
2506
return;
2507
2508
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2509
2510
if (policy->governor->limits)
2511
policy->governor->limits(policy);
2512
}
2513
2514
int cpufreq_register_governor(struct cpufreq_governor *governor)
2515
{
2516
int err;
2517
2518
if (!governor)
2519
return -EINVAL;
2520
2521
if (cpufreq_disabled())
2522
return -ENODEV;
2523
2524
mutex_lock(&cpufreq_governor_mutex);
2525
2526
err = -EBUSY;
2527
if (!find_governor(governor->name)) {
2528
err = 0;
2529
list_add(&governor->governor_list, &cpufreq_governor_list);
2530
}
2531
2532
mutex_unlock(&cpufreq_governor_mutex);
2533
return err;
2534
}
2535
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2536
2537
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2538
{
2539
struct cpufreq_policy *policy;
2540
unsigned long flags;
2541
2542
if (!governor)
2543
return;
2544
2545
if (cpufreq_disabled())
2546
return;
2547
2548
/* clear last_governor for all inactive policies */
2549
read_lock_irqsave(&cpufreq_driver_lock, flags);
2550
for_each_inactive_policy(policy) {
2551
if (!strcmp(policy->last_governor, governor->name)) {
2552
policy->governor = NULL;
2553
strcpy(policy->last_governor, "\0");
2554
}
2555
}
2556
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2557
2558
mutex_lock(&cpufreq_governor_mutex);
2559
list_del(&governor->governor_list);
2560
mutex_unlock(&cpufreq_governor_mutex);
2561
}
2562
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2563
2564
2565
/*********************************************************************
2566
* POLICY INTERFACE *
2567
*********************************************************************/
2568
2569
DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
2570
2571
/**
2572
* cpufreq_update_pressure() - Update cpufreq pressure for CPUs
2573
* @policy: cpufreq policy of the CPUs.
2574
*
2575
* Update the value of cpufreq pressure for all @cpus in the policy.
2576
*/
2577
static void cpufreq_update_pressure(struct cpufreq_policy *policy)
2578
{
2579
unsigned long max_capacity, capped_freq, pressure;
2580
u32 max_freq;
2581
int cpu;
2582
2583
cpu = cpumask_first(policy->related_cpus);
2584
max_freq = arch_scale_freq_ref(cpu);
2585
capped_freq = policy->max;
2586
2587
/*
2588
* Handle properly the boost frequencies, which should simply clean
2589
* the cpufreq pressure value.
2590
*/
2591
if (max_freq <= capped_freq) {
2592
pressure = 0;
2593
} else {
2594
max_capacity = arch_scale_cpu_capacity(cpu);
2595
pressure = max_capacity -
2596
mult_frac(max_capacity, capped_freq, max_freq);
2597
}
2598
2599
for_each_cpu(cpu, policy->related_cpus)
2600
WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
2601
}
2602
2603
/**
2604
* cpufreq_set_policy - Modify cpufreq policy parameters.
2605
* @policy: Policy object to modify.
2606
* @new_gov: Policy governor pointer.
2607
* @new_pol: Policy value (for drivers with built-in governors).
2608
*
2609
* Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2610
* limits to be set for the policy, update @policy with the verified limits
2611
* values and either invoke the driver's ->setpolicy() callback (if present) or
2612
* carry out a governor update for @policy. That is, run the current governor's
2613
* ->limits() callback (if @new_gov points to the same object as the one in
2614
* @policy) or replace the governor for @policy with @new_gov.
2615
*
2616
* The cpuinfo part of @policy is not updated by this function.
2617
*/
2618
static int cpufreq_set_policy(struct cpufreq_policy *policy,
2619
struct cpufreq_governor *new_gov,
2620
unsigned int new_pol)
2621
{
2622
struct cpufreq_policy_data new_data;
2623
struct cpufreq_governor *old_gov;
2624
int ret;
2625
2626
memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2627
new_data.freq_table = policy->freq_table;
2628
new_data.cpu = policy->cpu;
2629
/*
2630
* PM QoS framework collects all the requests from users and provide us
2631
* the final aggregated value here.
2632
*/
2633
new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2634
new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2635
2636
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2637
new_data.cpu, new_data.min, new_data.max);
2638
2639
/*
2640
* Verify that the CPU speed can be set within these limits and make sure
2641
* that min <= max.
2642
*/
2643
ret = cpufreq_driver->verify(&new_data);
2644
if (ret)
2645
return ret;
2646
2647
/*
2648
* Resolve policy min/max to available frequencies. It ensures
2649
* no frequency resolution will neither overshoot the requested maximum
2650
* nor undershoot the requested minimum.
2651
*
2652
* Avoid storing intermediate values in policy->max or policy->min and
2653
* compiler optimizations around them because they may be accessed
2654
* concurrently by cpufreq_driver_resolve_freq() during the update.
2655
*/
2656
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
2657
new_data.min, new_data.max,
2658
CPUFREQ_RELATION_H));
2659
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
2660
new_data.max, CPUFREQ_RELATION_L);
2661
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
2662
2663
trace_cpu_frequency_limits(policy);
2664
2665
cpufreq_update_pressure(policy);
2666
2667
policy->cached_target_freq = UINT_MAX;
2668
2669
pr_debug("new min and max freqs are %u - %u kHz\n",
2670
policy->min, policy->max);
2671
2672
if (cpufreq_driver->setpolicy) {
2673
policy->policy = new_pol;
2674
pr_debug("setting range\n");
2675
return cpufreq_driver->setpolicy(policy);
2676
}
2677
2678
if (new_gov == policy->governor) {
2679
pr_debug("governor limits update\n");
2680
cpufreq_governor_limits(policy);
2681
return 0;
2682
}
2683
2684
pr_debug("governor switch\n");
2685
2686
/* save old, working values */
2687
old_gov = policy->governor;
2688
/* end old governor */
2689
if (old_gov) {
2690
cpufreq_stop_governor(policy);
2691
cpufreq_exit_governor(policy);
2692
}
2693
2694
/* start new governor */
2695
policy->governor = new_gov;
2696
ret = cpufreq_init_governor(policy);
2697
if (!ret) {
2698
ret = cpufreq_start_governor(policy);
2699
if (!ret) {
2700
pr_debug("governor change\n");
2701
return 0;
2702
}
2703
cpufreq_exit_governor(policy);
2704
}
2705
2706
/* new governor failed, so re-start old one */
2707
pr_debug("starting governor %s failed\n", policy->governor->name);
2708
if (old_gov) {
2709
policy->governor = old_gov;
2710
if (cpufreq_init_governor(policy)) {
2711
policy->governor = NULL;
2712
} else if (cpufreq_start_governor(policy)) {
2713
cpufreq_exit_governor(policy);
2714
policy->governor = NULL;
2715
}
2716
}
2717
2718
return ret;
2719
}
2720
2721
static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
2722
{
2723
guard(cpufreq_policy_write)(policy);
2724
2725
/*
2726
* BIOS might change freq behind our back
2727
* -> ask driver for current freq and notify governors about a change
2728
*/
2729
if (cpufreq_driver->get && has_target() &&
2730
(cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2731
return;
2732
2733
refresh_frequency_limits(policy);
2734
}
2735
2736
/**
2737
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2738
* @cpu: CPU to re-evaluate the policy for.
2739
*
2740
* Update the current frequency for the cpufreq policy of @cpu and use
2741
* cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2742
* evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2743
* for the policy in question, among other things.
2744
*/
2745
void cpufreq_update_policy(unsigned int cpu)
2746
{
2747
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
2748
if (!policy)
2749
return;
2750
2751
cpufreq_policy_refresh(policy);
2752
}
2753
EXPORT_SYMBOL(cpufreq_update_policy);
2754
2755
/**
2756
* cpufreq_update_limits - Update policy limits for a given CPU.
2757
* @cpu: CPU to update the policy limits for.
2758
*
2759
* Invoke the driver's ->update_limits callback if present or call
2760
* cpufreq_policy_refresh() for @cpu.
2761
*/
2762
void cpufreq_update_limits(unsigned int cpu)
2763
{
2764
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
2765
if (!policy)
2766
return;
2767
2768
if (cpufreq_driver->update_limits)
2769
cpufreq_driver->update_limits(policy);
2770
else
2771
cpufreq_policy_refresh(policy);
2772
}
2773
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2774
2775
/*********************************************************************
2776
* BOOST *
2777
*********************************************************************/
2778
int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2779
{
2780
int ret;
2781
2782
if (!policy->freq_table)
2783
return -ENXIO;
2784
2785
ret = cpufreq_frequency_table_cpuinfo(policy);
2786
if (ret) {
2787
pr_err("%s: Policy frequency update failed\n", __func__);
2788
return ret;
2789
}
2790
2791
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2792
if (ret < 0)
2793
return ret;
2794
2795
return 0;
2796
}
2797
EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
2798
2799
static int cpufreq_boost_trigger_state(int state)
2800
{
2801
struct cpufreq_policy *policy;
2802
unsigned long flags;
2803
int ret = 0;
2804
2805
/*
2806
* Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
2807
* make sure all policies are in sync with global boost flag.
2808
*/
2809
2810
write_lock_irqsave(&cpufreq_driver_lock, flags);
2811
cpufreq_driver->boost_enabled = state;
2812
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2813
2814
cpus_read_lock();
2815
for_each_active_policy(policy) {
2816
if (!policy->boost_supported)
2817
continue;
2818
2819
ret = policy_set_boost(policy, state);
2820
if (ret)
2821
goto err_reset_state;
2822
}
2823
cpus_read_unlock();
2824
2825
return 0;
2826
2827
err_reset_state:
2828
cpus_read_unlock();
2829
2830
write_lock_irqsave(&cpufreq_driver_lock, flags);
2831
cpufreq_driver->boost_enabled = !state;
2832
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2833
2834
pr_err("%s: Cannot %s BOOST\n",
2835
__func__, str_enable_disable(state));
2836
2837
return ret;
2838
}
2839
2840
static bool cpufreq_boost_supported(void)
2841
{
2842
return cpufreq_driver->set_boost;
2843
}
2844
2845
static int create_boost_sysfs_file(void)
2846
{
2847
int ret;
2848
2849
ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2850
if (ret)
2851
pr_err("%s: cannot register global BOOST sysfs file\n",
2852
__func__);
2853
2854
return ret;
2855
}
2856
2857
static void remove_boost_sysfs_file(void)
2858
{
2859
if (cpufreq_boost_supported())
2860
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2861
}
2862
2863
bool cpufreq_boost_enabled(void)
2864
{
2865
return cpufreq_driver->boost_enabled;
2866
}
2867
EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2868
2869
/*********************************************************************
2870
* REGISTER / UNREGISTER CPUFREQ DRIVER *
2871
*********************************************************************/
2872
static enum cpuhp_state hp_online;
2873
2874
static int cpuhp_cpufreq_online(unsigned int cpu)
2875
{
2876
cpufreq_online(cpu);
2877
2878
return 0;
2879
}
2880
2881
static int cpuhp_cpufreq_offline(unsigned int cpu)
2882
{
2883
cpufreq_offline(cpu);
2884
2885
return 0;
2886
}
2887
2888
/**
2889
* cpufreq_register_driver - register a CPU Frequency driver
2890
* @driver_data: A struct cpufreq_driver containing the values#
2891
* submitted by the CPU Frequency driver.
2892
*
2893
* Registers a CPU Frequency driver to this core code. This code
2894
* returns zero on success, -EEXIST when another driver got here first
2895
* (and isn't unregistered in the meantime).
2896
*
2897
*/
2898
int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2899
{
2900
unsigned long flags;
2901
int ret;
2902
2903
if (cpufreq_disabled())
2904
return -ENODEV;
2905
2906
/*
2907
* The cpufreq core depends heavily on the availability of device
2908
* structure, make sure they are available before proceeding further.
2909
*/
2910
if (!get_cpu_device(0))
2911
return -EPROBE_DEFER;
2912
2913
if (!driver_data || !driver_data->verify || !driver_data->init ||
2914
(driver_data->target_index && driver_data->target) ||
2915
(!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
2916
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2917
(!driver_data->online != !driver_data->offline) ||
2918
(driver_data->adjust_perf && !driver_data->fast_switch))
2919
return -EINVAL;
2920
2921
pr_debug("trying to register driver %s\n", driver_data->name);
2922
2923
/* Protect against concurrent CPU online/offline. */
2924
cpus_read_lock();
2925
2926
write_lock_irqsave(&cpufreq_driver_lock, flags);
2927
if (cpufreq_driver) {
2928
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2929
ret = -EEXIST;
2930
goto out;
2931
}
2932
cpufreq_driver = driver_data;
2933
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2934
2935
if (driver_data->setpolicy)
2936
driver_data->flags |= CPUFREQ_CONST_LOOPS;
2937
2938
if (cpufreq_boost_supported()) {
2939
ret = create_boost_sysfs_file();
2940
if (ret)
2941
goto err_null_driver;
2942
}
2943
2944
/*
2945
* Mark support for the scheduler's frequency invariance engine for
2946
* drivers that implement target(), target_index() or fast_switch().
2947
*/
2948
if (!cpufreq_driver->setpolicy) {
2949
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2950
pr_debug("cpufreq: supports frequency invariance\n");
2951
}
2952
2953
ret = subsys_interface_register(&cpufreq_interface);
2954
if (ret)
2955
goto err_boost_unreg;
2956
2957
if (unlikely(list_empty(&cpufreq_policy_list))) {
2958
/* if all ->init() calls failed, unregister */
2959
ret = -ENODEV;
2960
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2961
driver_data->name);
2962
goto err_if_unreg;
2963
}
2964
2965
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2966
"cpufreq:online",
2967
cpuhp_cpufreq_online,
2968
cpuhp_cpufreq_offline);
2969
if (ret < 0)
2970
goto err_if_unreg;
2971
hp_online = ret;
2972
ret = 0;
2973
2974
pr_debug("driver %s up and running\n", driver_data->name);
2975
goto out;
2976
2977
err_if_unreg:
2978
subsys_interface_unregister(&cpufreq_interface);
2979
err_boost_unreg:
2980
if (!cpufreq_driver->setpolicy)
2981
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2982
remove_boost_sysfs_file();
2983
err_null_driver:
2984
write_lock_irqsave(&cpufreq_driver_lock, flags);
2985
cpufreq_driver = NULL;
2986
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2987
out:
2988
cpus_read_unlock();
2989
return ret;
2990
}
2991
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2992
2993
/*
2994
* cpufreq_unregister_driver - unregister the current CPUFreq driver
2995
*
2996
* Unregister the current CPUFreq driver. Only call this if you have
2997
* the right to do so, i.e. if you have succeeded in initialising before!
2998
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
2999
* currently not initialised.
3000
*/
3001
void cpufreq_unregister_driver(struct cpufreq_driver *driver)
3002
{
3003
unsigned long flags;
3004
3005
if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
3006
return;
3007
3008
pr_debug("unregistering driver %s\n", driver->name);
3009
3010
/* Protect against concurrent cpu hotplug */
3011
cpus_read_lock();
3012
subsys_interface_unregister(&cpufreq_interface);
3013
remove_boost_sysfs_file();
3014
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
3015
cpuhp_remove_state_nocalls_cpuslocked(hp_online);
3016
3017
write_lock_irqsave(&cpufreq_driver_lock, flags);
3018
3019
cpufreq_driver = NULL;
3020
3021
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
3022
cpus_read_unlock();
3023
}
3024
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
3025
3026
static int __init cpufreq_core_init(void)
3027
{
3028
struct cpufreq_governor *gov = cpufreq_default_governor();
3029
struct device *dev_root;
3030
3031
if (cpufreq_disabled())
3032
return -ENODEV;
3033
3034
dev_root = bus_get_dev_root(&cpu_subsys);
3035
if (dev_root) {
3036
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
3037
put_device(dev_root);
3038
}
3039
BUG_ON(!cpufreq_global_kobject);
3040
3041
if (!strlen(default_governor))
3042
strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
3043
3044
return 0;
3045
}
3046
3047
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
3048
{
3049
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
3050
if (!policy) {
3051
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
3052
return false;
3053
}
3054
3055
return sugov_is_governor(policy);
3056
}
3057
3058
bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
3059
{
3060
unsigned int cpu;
3061
3062
/* Do not attempt EAS if schedutil is not being used. */
3063
for_each_cpu(cpu, cpu_mask) {
3064
if (!cpufreq_policy_is_good_for_eas(cpu)) {
3065
pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
3066
cpumask_pr_args(cpu_mask));
3067
return false;
3068
}
3069
}
3070
3071
return true;
3072
}
3073
3074
module_param(off, int, 0444);
3075
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
3076
core_initcall(cpufreq_core_init);
3077
3078