Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
29285 views
1
/*
2
* Copyright 2011 Advanced Micro Devices, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*
22
* Authors: Alex Deucher
23
*/
24
25
#include "amdgpu.h"
26
#include "amdgpu_atombios.h"
27
#include "amdgpu_i2c.h"
28
#include "amdgpu_dpm.h"
29
#include "atom.h"
30
#include "amd_pcie.h"
31
#include "amdgpu_display.h"
32
#include "hwmgr.h"
33
#include <linux/power_supply.h>
34
#include "amdgpu_smu.h"
35
36
#define amdgpu_dpm_enable_bapm(adev, e) \
37
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39
#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40
41
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42
{
43
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44
int ret = 0;
45
46
if (!pp_funcs->get_sclk)
47
return 0;
48
49
mutex_lock(&adev->pm.mutex);
50
ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51
low);
52
mutex_unlock(&adev->pm.mutex);
53
54
return ret;
55
}
56
57
int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58
{
59
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60
int ret = 0;
61
62
if (!pp_funcs->get_mclk)
63
return 0;
64
65
mutex_lock(&adev->pm.mutex);
66
ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67
low);
68
mutex_unlock(&adev->pm.mutex);
69
70
return ret;
71
}
72
73
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
74
uint32_t block_type,
75
bool gate,
76
int inst)
77
{
78
int ret = 0;
79
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
80
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
81
bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
82
83
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
84
(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
85
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
86
block_type, gate ? "gate" : "ungate");
87
return 0;
88
}
89
90
mutex_lock(&adev->pm.mutex);
91
92
switch (block_type) {
93
case AMD_IP_BLOCK_TYPE_UVD:
94
case AMD_IP_BLOCK_TYPE_VCE:
95
case AMD_IP_BLOCK_TYPE_GFX:
96
case AMD_IP_BLOCK_TYPE_SDMA:
97
case AMD_IP_BLOCK_TYPE_JPEG:
98
case AMD_IP_BLOCK_TYPE_GMC:
99
case AMD_IP_BLOCK_TYPE_ACP:
100
case AMD_IP_BLOCK_TYPE_VPE:
101
case AMD_IP_BLOCK_TYPE_ISP:
102
if (pp_funcs && pp_funcs->set_powergating_by_smu)
103
ret = (pp_funcs->set_powergating_by_smu(
104
(adev)->powerplay.pp_handle, block_type, gate, 0));
105
break;
106
case AMD_IP_BLOCK_TYPE_VCN:
107
if (pp_funcs && pp_funcs->set_powergating_by_smu)
108
ret = (pp_funcs->set_powergating_by_smu(
109
(adev)->powerplay.pp_handle, block_type, gate, inst));
110
break;
111
default:
112
break;
113
}
114
115
if (!ret)
116
atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
117
118
mutex_unlock(&adev->pm.mutex);
119
120
return ret;
121
}
122
123
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
124
{
125
struct smu_context *smu = adev->powerplay.pp_handle;
126
int ret = -EOPNOTSUPP;
127
128
mutex_lock(&adev->pm.mutex);
129
ret = smu_set_gfx_power_up_by_imu(smu);
130
mutex_unlock(&adev->pm.mutex);
131
132
msleep(10);
133
134
return ret;
135
}
136
137
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
138
{
139
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
140
void *pp_handle = adev->powerplay.pp_handle;
141
int ret = 0;
142
143
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
144
return -ENOENT;
145
146
mutex_lock(&adev->pm.mutex);
147
148
/* enter BACO state */
149
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
150
151
mutex_unlock(&adev->pm.mutex);
152
153
return ret;
154
}
155
156
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
157
{
158
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
159
void *pp_handle = adev->powerplay.pp_handle;
160
int ret = 0;
161
162
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
163
return -ENOENT;
164
165
mutex_lock(&adev->pm.mutex);
166
167
/* exit BACO state */
168
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
169
170
mutex_unlock(&adev->pm.mutex);
171
172
return ret;
173
}
174
175
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
176
enum pp_mp1_state mp1_state)
177
{
178
int ret = 0;
179
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
180
181
if (mp1_state == PP_MP1_STATE_FLR) {
182
/* VF lost access to SMU */
183
if (amdgpu_sriov_vf(adev))
184
adev->pm.dpm_enabled = false;
185
} else if (pp_funcs && pp_funcs->set_mp1_state) {
186
mutex_lock(&adev->pm.mutex);
187
188
ret = pp_funcs->set_mp1_state(
189
adev->powerplay.pp_handle,
190
mp1_state);
191
192
mutex_unlock(&adev->pm.mutex);
193
}
194
195
return ret;
196
}
197
198
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
199
{
200
int ret = 0;
201
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
202
203
if (pp_funcs && pp_funcs->notify_rlc_state) {
204
mutex_lock(&adev->pm.mutex);
205
206
ret = pp_funcs->notify_rlc_state(
207
adev->powerplay.pp_handle,
208
en);
209
210
mutex_unlock(&adev->pm.mutex);
211
}
212
213
return ret;
214
}
215
216
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
217
{
218
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
219
void *pp_handle = adev->powerplay.pp_handle;
220
int ret;
221
222
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
223
return 0;
224
/* Don't use baco for reset in S3.
225
* This is a workaround for some platforms
226
* where entering BACO during suspend
227
* seems to cause reboots or hangs.
228
* This might be related to the fact that BACO controls
229
* power to the whole GPU including devices like audio and USB.
230
* Powering down/up everything may adversely affect these other
231
* devices. Needs more investigation.
232
*/
233
if (adev->in_s3)
234
return 0;
235
236
mutex_lock(&adev->pm.mutex);
237
238
ret = pp_funcs->get_asic_baco_capability(pp_handle);
239
240
mutex_unlock(&adev->pm.mutex);
241
242
return ret;
243
}
244
245
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
246
{
247
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
248
void *pp_handle = adev->powerplay.pp_handle;
249
int ret = 0;
250
251
if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
252
return -ENOENT;
253
254
mutex_lock(&adev->pm.mutex);
255
256
ret = pp_funcs->asic_reset_mode_2(pp_handle);
257
258
mutex_unlock(&adev->pm.mutex);
259
260
return ret;
261
}
262
263
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
264
{
265
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
266
void *pp_handle = adev->powerplay.pp_handle;
267
int ret = 0;
268
269
if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
270
return -ENOENT;
271
272
mutex_lock(&adev->pm.mutex);
273
274
ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
275
276
mutex_unlock(&adev->pm.mutex);
277
278
return ret;
279
}
280
281
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
282
{
283
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
284
void *pp_handle = adev->powerplay.pp_handle;
285
int ret = 0;
286
287
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
288
return -ENOENT;
289
290
mutex_lock(&adev->pm.mutex);
291
292
/* enter BACO state */
293
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
294
if (ret)
295
goto out;
296
297
/* exit BACO state */
298
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
299
300
out:
301
mutex_unlock(&adev->pm.mutex);
302
return ret;
303
}
304
305
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
306
{
307
struct smu_context *smu = adev->powerplay.pp_handle;
308
bool support_mode1_reset = false;
309
310
if (is_support_sw_smu(adev)) {
311
mutex_lock(&adev->pm.mutex);
312
support_mode1_reset = smu_mode1_reset_is_support(smu);
313
mutex_unlock(&adev->pm.mutex);
314
}
315
316
return support_mode1_reset;
317
}
318
319
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
320
{
321
struct smu_context *smu = adev->powerplay.pp_handle;
322
int ret = -EOPNOTSUPP;
323
324
if (is_support_sw_smu(adev)) {
325
mutex_lock(&adev->pm.mutex);
326
ret = smu_mode1_reset(smu);
327
mutex_unlock(&adev->pm.mutex);
328
}
329
330
return ret;
331
}
332
333
bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev)
334
{
335
struct smu_context *smu = adev->powerplay.pp_handle;
336
bool support_link_reset = false;
337
338
if (is_support_sw_smu(adev)) {
339
mutex_lock(&adev->pm.mutex);
340
support_link_reset = smu_link_reset_is_support(smu);
341
mutex_unlock(&adev->pm.mutex);
342
}
343
344
return support_link_reset;
345
}
346
347
int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
348
{
349
struct smu_context *smu = adev->powerplay.pp_handle;
350
int ret = -EOPNOTSUPP;
351
352
if (is_support_sw_smu(adev)) {
353
mutex_lock(&adev->pm.mutex);
354
ret = smu_link_reset(smu);
355
mutex_unlock(&adev->pm.mutex);
356
}
357
358
return ret;
359
}
360
361
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
362
enum PP_SMC_POWER_PROFILE type,
363
bool en)
364
{
365
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
366
int ret = 0;
367
368
if (amdgpu_sriov_vf(adev))
369
return 0;
370
371
if (pp_funcs && pp_funcs->switch_power_profile) {
372
mutex_lock(&adev->pm.mutex);
373
ret = pp_funcs->switch_power_profile(
374
adev->powerplay.pp_handle, type, en);
375
mutex_unlock(&adev->pm.mutex);
376
}
377
378
return ret;
379
}
380
381
int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
382
bool pause)
383
{
384
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
385
int ret = 0;
386
387
if (amdgpu_sriov_vf(adev))
388
return 0;
389
390
if (pp_funcs && pp_funcs->pause_power_profile) {
391
mutex_lock(&adev->pm.mutex);
392
ret = pp_funcs->pause_power_profile(
393
adev->powerplay.pp_handle, pause);
394
mutex_unlock(&adev->pm.mutex);
395
}
396
397
return ret;
398
}
399
400
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
401
uint32_t pstate)
402
{
403
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
404
int ret = 0;
405
406
if (pp_funcs && pp_funcs->set_xgmi_pstate) {
407
mutex_lock(&adev->pm.mutex);
408
ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
409
pstate);
410
mutex_unlock(&adev->pm.mutex);
411
}
412
413
return ret;
414
}
415
416
int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
417
uint32_t cstate)
418
{
419
int ret = 0;
420
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
421
void *pp_handle = adev->powerplay.pp_handle;
422
423
if (pp_funcs && pp_funcs->set_df_cstate) {
424
mutex_lock(&adev->pm.mutex);
425
ret = pp_funcs->set_df_cstate(pp_handle, cstate);
426
mutex_unlock(&adev->pm.mutex);
427
}
428
429
return ret;
430
}
431
432
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
433
enum pp_pm_policy p_type, char *buf)
434
{
435
struct smu_context *smu = adev->powerplay.pp_handle;
436
int ret = -EOPNOTSUPP;
437
438
if (is_support_sw_smu(adev)) {
439
mutex_lock(&adev->pm.mutex);
440
ret = smu_get_pm_policy_info(smu, p_type, buf);
441
mutex_unlock(&adev->pm.mutex);
442
}
443
444
return ret;
445
}
446
447
int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
448
int policy_level)
449
{
450
struct smu_context *smu = adev->powerplay.pp_handle;
451
int ret = -EOPNOTSUPP;
452
453
if (is_support_sw_smu(adev)) {
454
mutex_lock(&adev->pm.mutex);
455
ret = smu_set_pm_policy(smu, policy_type, policy_level);
456
mutex_unlock(&adev->pm.mutex);
457
}
458
459
return ret;
460
}
461
462
int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
463
{
464
void *pp_handle = adev->powerplay.pp_handle;
465
const struct amd_pm_funcs *pp_funcs =
466
adev->powerplay.pp_funcs;
467
int ret = 0;
468
469
if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
470
mutex_lock(&adev->pm.mutex);
471
ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
472
mutex_unlock(&adev->pm.mutex);
473
}
474
475
return ret;
476
}
477
478
int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
479
uint32_t msg_id)
480
{
481
void *pp_handle = adev->powerplay.pp_handle;
482
const struct amd_pm_funcs *pp_funcs =
483
adev->powerplay.pp_funcs;
484
int ret = 0;
485
486
if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
487
mutex_lock(&adev->pm.mutex);
488
ret = pp_funcs->set_clockgating_by_smu(pp_handle,
489
msg_id);
490
mutex_unlock(&adev->pm.mutex);
491
}
492
493
return ret;
494
}
495
496
int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
497
bool acquire)
498
{
499
void *pp_handle = adev->powerplay.pp_handle;
500
const struct amd_pm_funcs *pp_funcs =
501
adev->powerplay.pp_funcs;
502
int ret = -EOPNOTSUPP;
503
504
if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
505
mutex_lock(&adev->pm.mutex);
506
ret = pp_funcs->smu_i2c_bus_access(pp_handle,
507
acquire);
508
mutex_unlock(&adev->pm.mutex);
509
}
510
511
return ret;
512
}
513
514
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
515
{
516
if (adev->pm.dpm_enabled) {
517
mutex_lock(&adev->pm.mutex);
518
if (power_supply_is_system_supplied() > 0)
519
adev->pm.ac_power = true;
520
else
521
adev->pm.ac_power = false;
522
523
if (adev->powerplay.pp_funcs &&
524
adev->powerplay.pp_funcs->enable_bapm)
525
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
526
527
if (is_support_sw_smu(adev))
528
smu_set_ac_dc(adev->powerplay.pp_handle);
529
530
mutex_unlock(&adev->pm.mutex);
531
}
532
}
533
534
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
535
void *data, uint32_t *size)
536
{
537
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
538
int ret = -EINVAL;
539
540
if (!data || !size)
541
return -EINVAL;
542
543
if (pp_funcs && pp_funcs->read_sensor) {
544
mutex_lock(&adev->pm.mutex);
545
ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
546
sensor,
547
data,
548
size);
549
mutex_unlock(&adev->pm.mutex);
550
}
551
552
return ret;
553
}
554
555
int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
556
{
557
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
558
int ret = -EOPNOTSUPP;
559
560
if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
561
mutex_lock(&adev->pm.mutex);
562
ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
563
mutex_unlock(&adev->pm.mutex);
564
}
565
566
return ret;
567
}
568
569
int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
570
{
571
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
572
int ret = -EOPNOTSUPP;
573
574
if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
575
mutex_lock(&adev->pm.mutex);
576
ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
577
mutex_unlock(&adev->pm.mutex);
578
}
579
580
return ret;
581
}
582
583
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
584
{
585
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
586
int i;
587
588
if (!adev->pm.dpm_enabled)
589
return;
590
591
if (!pp_funcs->pm_compute_clocks)
592
return;
593
594
if (adev->mode_info.num_crtc)
595
amdgpu_display_bandwidth_update(adev);
596
597
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
598
struct amdgpu_ring *ring = adev->rings[i];
599
if (ring && ring->sched.ready)
600
amdgpu_fence_wait_empty(ring);
601
}
602
603
mutex_lock(&adev->pm.mutex);
604
pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
605
mutex_unlock(&adev->pm.mutex);
606
}
607
608
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
609
{
610
int ret = 0;
611
612
if (adev->family == AMDGPU_FAMILY_SI) {
613
mutex_lock(&adev->pm.mutex);
614
if (enable) {
615
adev->pm.dpm.uvd_active = true;
616
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
617
} else {
618
adev->pm.dpm.uvd_active = false;
619
}
620
mutex_unlock(&adev->pm.mutex);
621
622
amdgpu_dpm_compute_clocks(adev);
623
return;
624
}
625
626
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
627
if (ret)
628
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
629
enable ? "enable" : "disable", ret);
630
}
631
632
void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
633
{
634
int ret = 0;
635
636
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
637
if (ret)
638
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
639
enable ? "enable" : "disable", ret);
640
}
641
642
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
643
{
644
int ret = 0;
645
646
if (adev->family == AMDGPU_FAMILY_SI) {
647
mutex_lock(&adev->pm.mutex);
648
if (enable) {
649
adev->pm.dpm.vce_active = true;
650
/* XXX select vce level based on ring/task */
651
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
652
} else {
653
adev->pm.dpm.vce_active = false;
654
}
655
mutex_unlock(&adev->pm.mutex);
656
657
amdgpu_dpm_compute_clocks(adev);
658
return;
659
}
660
661
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
662
if (ret)
663
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
664
enable ? "enable" : "disable", ret);
665
}
666
667
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
668
{
669
int ret = 0;
670
671
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
672
if (ret)
673
DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
674
enable ? "enable" : "disable", ret);
675
}
676
677
void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
678
{
679
int ret = 0;
680
681
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
682
if (ret)
683
DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
684
enable ? "enable" : "disable", ret);
685
}
686
687
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
688
{
689
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
690
int r = 0;
691
692
if (!pp_funcs || !pp_funcs->load_firmware ||
693
(is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
694
return 0;
695
696
mutex_lock(&adev->pm.mutex);
697
r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
698
if (r) {
699
pr_err("smu firmware loading failed\n");
700
goto out;
701
}
702
703
if (smu_version)
704
*smu_version = adev->pm.fw_version;
705
706
out:
707
mutex_unlock(&adev->pm.mutex);
708
return r;
709
}
710
711
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
712
{
713
int ret = 0;
714
715
if (is_support_sw_smu(adev)) {
716
mutex_lock(&adev->pm.mutex);
717
ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
718
enable);
719
mutex_unlock(&adev->pm.mutex);
720
}
721
722
return ret;
723
}
724
725
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
726
{
727
struct smu_context *smu = adev->powerplay.pp_handle;
728
int ret = 0;
729
730
if (!is_support_sw_smu(adev))
731
return -EOPNOTSUPP;
732
733
mutex_lock(&adev->pm.mutex);
734
ret = smu_send_hbm_bad_pages_num(smu, size);
735
mutex_unlock(&adev->pm.mutex);
736
737
return ret;
738
}
739
740
int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
741
{
742
struct smu_context *smu = adev->powerplay.pp_handle;
743
int ret = 0;
744
745
if (!is_support_sw_smu(adev))
746
return -EOPNOTSUPP;
747
748
mutex_lock(&adev->pm.mutex);
749
ret = smu_send_hbm_bad_channel_flag(smu, size);
750
mutex_unlock(&adev->pm.mutex);
751
752
return ret;
753
}
754
755
int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
756
{
757
struct smu_context *smu = adev->powerplay.pp_handle;
758
int ret;
759
760
if (!is_support_sw_smu(adev))
761
return -EOPNOTSUPP;
762
763
mutex_lock(&adev->pm.mutex);
764
ret = smu_send_rma_reason(smu);
765
mutex_unlock(&adev->pm.mutex);
766
767
return ret;
768
}
769
770
/**
771
* amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported
772
* @adev: amdgpu_device pointer
773
*
774
* This function checks if the SMU supports resetting the SDMA engine.
775
* It returns false if the hardware does not support software SMU or
776
* if the feature is not supported.
777
*/
778
bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
779
{
780
struct smu_context *smu = adev->powerplay.pp_handle;
781
bool ret;
782
783
if (!is_support_sw_smu(adev))
784
return false;
785
786
mutex_lock(&adev->pm.mutex);
787
ret = smu_reset_sdma_is_supported(smu);
788
mutex_unlock(&adev->pm.mutex);
789
790
return ret;
791
}
792
793
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
794
{
795
struct smu_context *smu = adev->powerplay.pp_handle;
796
int ret;
797
798
if (!is_support_sw_smu(adev))
799
return -EOPNOTSUPP;
800
801
mutex_lock(&adev->pm.mutex);
802
ret = smu_reset_sdma(smu, inst_mask);
803
mutex_unlock(&adev->pm.mutex);
804
805
return ret;
806
}
807
808
int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
809
{
810
struct smu_context *smu = adev->powerplay.pp_handle;
811
int ret;
812
813
if (!is_support_sw_smu(adev))
814
return -EOPNOTSUPP;
815
816
mutex_lock(&adev->pm.mutex);
817
ret = smu_reset_vcn(smu, inst_mask);
818
mutex_unlock(&adev->pm.mutex);
819
820
return ret;
821
}
822
823
bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev)
824
{
825
struct smu_context *smu = adev->powerplay.pp_handle;
826
bool ret;
827
828
if (!is_support_sw_smu(adev))
829
return false;
830
831
mutex_lock(&adev->pm.mutex);
832
ret = smu_reset_vcn_is_supported(smu);
833
mutex_unlock(&adev->pm.mutex);
834
835
return ret;
836
}
837
838
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
839
enum pp_clock_type type,
840
uint32_t *min,
841
uint32_t *max)
842
{
843
int ret = 0;
844
845
if (type != PP_SCLK)
846
return -EINVAL;
847
848
if (!is_support_sw_smu(adev))
849
return -EOPNOTSUPP;
850
851
mutex_lock(&adev->pm.mutex);
852
ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
853
SMU_SCLK,
854
min,
855
max);
856
mutex_unlock(&adev->pm.mutex);
857
858
return ret;
859
}
860
861
int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
862
enum pp_clock_type type,
863
uint32_t min,
864
uint32_t max)
865
{
866
struct smu_context *smu = adev->powerplay.pp_handle;
867
868
if (!is_support_sw_smu(adev))
869
return -EOPNOTSUPP;
870
871
guard(mutex)(&adev->pm.mutex);
872
873
return smu_set_soft_freq_range(smu,
874
type,
875
min,
876
max);
877
}
878
879
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
880
{
881
struct smu_context *smu = adev->powerplay.pp_handle;
882
int ret = 0;
883
884
if (!is_support_sw_smu(adev))
885
return 0;
886
887
mutex_lock(&adev->pm.mutex);
888
ret = smu_write_watermarks_table(smu);
889
mutex_unlock(&adev->pm.mutex);
890
891
return ret;
892
}
893
894
int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
895
enum smu_event_type event,
896
uint64_t event_arg)
897
{
898
struct smu_context *smu = adev->powerplay.pp_handle;
899
int ret = 0;
900
901
if (!is_support_sw_smu(adev))
902
return -EOPNOTSUPP;
903
904
mutex_lock(&adev->pm.mutex);
905
ret = smu_wait_for_event(smu, event, event_arg);
906
mutex_unlock(&adev->pm.mutex);
907
908
return ret;
909
}
910
911
int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
912
{
913
struct smu_context *smu = adev->powerplay.pp_handle;
914
int ret = 0;
915
916
if (!is_support_sw_smu(adev))
917
return -EOPNOTSUPP;
918
919
mutex_lock(&adev->pm.mutex);
920
ret = smu_set_residency_gfxoff(smu, value);
921
mutex_unlock(&adev->pm.mutex);
922
923
return ret;
924
}
925
926
int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
927
{
928
struct smu_context *smu = adev->powerplay.pp_handle;
929
int ret = 0;
930
931
if (!is_support_sw_smu(adev))
932
return -EOPNOTSUPP;
933
934
mutex_lock(&adev->pm.mutex);
935
ret = smu_get_residency_gfxoff(smu, value);
936
mutex_unlock(&adev->pm.mutex);
937
938
return ret;
939
}
940
941
int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
942
{
943
struct smu_context *smu = adev->powerplay.pp_handle;
944
int ret = 0;
945
946
if (!is_support_sw_smu(adev))
947
return -EOPNOTSUPP;
948
949
mutex_lock(&adev->pm.mutex);
950
ret = smu_get_entrycount_gfxoff(smu, value);
951
mutex_unlock(&adev->pm.mutex);
952
953
return ret;
954
}
955
956
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
957
{
958
struct smu_context *smu = adev->powerplay.pp_handle;
959
int ret = 0;
960
961
if (!is_support_sw_smu(adev))
962
return -EOPNOTSUPP;
963
964
mutex_lock(&adev->pm.mutex);
965
ret = smu_get_status_gfxoff(smu, value);
966
mutex_unlock(&adev->pm.mutex);
967
968
return ret;
969
}
970
971
uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
972
{
973
struct smu_context *smu = adev->powerplay.pp_handle;
974
975
if (!is_support_sw_smu(adev))
976
return 0;
977
978
return atomic64_read(&smu->throttle_int_counter);
979
}
980
981
/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
982
* @adev: amdgpu_device pointer
983
* @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
984
*
985
*/
986
void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
987
enum gfx_change_state state)
988
{
989
mutex_lock(&adev->pm.mutex);
990
if (adev->powerplay.pp_funcs &&
991
adev->powerplay.pp_funcs->gfx_state_change_set)
992
((adev)->powerplay.pp_funcs->gfx_state_change_set(
993
(adev)->powerplay.pp_handle, state));
994
mutex_unlock(&adev->pm.mutex);
995
}
996
997
int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
998
void *umc_ecc)
999
{
1000
struct smu_context *smu = adev->powerplay.pp_handle;
1001
int ret = 0;
1002
1003
if (!is_support_sw_smu(adev))
1004
return -EOPNOTSUPP;
1005
1006
mutex_lock(&adev->pm.mutex);
1007
ret = smu_get_ecc_info(smu, umc_ecc);
1008
mutex_unlock(&adev->pm.mutex);
1009
1010
return ret;
1011
}
1012
1013
struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
1014
uint32_t idx)
1015
{
1016
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1017
struct amd_vce_state *vstate = NULL;
1018
1019
if (!pp_funcs->get_vce_clock_state)
1020
return NULL;
1021
1022
mutex_lock(&adev->pm.mutex);
1023
vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
1024
idx);
1025
mutex_unlock(&adev->pm.mutex);
1026
1027
return vstate;
1028
}
1029
1030
void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
1031
enum amd_pm_state_type *state)
1032
{
1033
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1034
1035
mutex_lock(&adev->pm.mutex);
1036
1037
if (!pp_funcs->get_current_power_state) {
1038
*state = adev->pm.dpm.user_state;
1039
goto out;
1040
}
1041
1042
*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
1043
if (*state < POWER_STATE_TYPE_DEFAULT ||
1044
*state > POWER_STATE_TYPE_INTERNAL_3DPERF)
1045
*state = adev->pm.dpm.user_state;
1046
1047
out:
1048
mutex_unlock(&adev->pm.mutex);
1049
}
1050
1051
void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
1052
enum amd_pm_state_type state)
1053
{
1054
mutex_lock(&adev->pm.mutex);
1055
adev->pm.dpm.user_state = state;
1056
mutex_unlock(&adev->pm.mutex);
1057
1058
if (is_support_sw_smu(adev))
1059
return;
1060
1061
if (amdgpu_dpm_dispatch_task(adev,
1062
AMD_PP_TASK_ENABLE_USER_STATE,
1063
&state) == -EOPNOTSUPP)
1064
amdgpu_dpm_compute_clocks(adev);
1065
}
1066
1067
enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
1068
{
1069
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1070
enum amd_dpm_forced_level level;
1071
1072
if (!pp_funcs)
1073
return AMD_DPM_FORCED_LEVEL_AUTO;
1074
1075
mutex_lock(&adev->pm.mutex);
1076
if (pp_funcs->get_performance_level)
1077
level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
1078
else
1079
level = adev->pm.dpm.forced_level;
1080
mutex_unlock(&adev->pm.mutex);
1081
1082
return level;
1083
}
1084
1085
static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
1086
{
1087
/* enter UMD Pstate */
1088
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1089
AMD_PG_STATE_UNGATE);
1090
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1091
AMD_CG_STATE_UNGATE);
1092
}
1093
1094
static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
1095
{
1096
/* exit UMD Pstate */
1097
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1098
AMD_CG_STATE_GATE);
1099
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1100
AMD_PG_STATE_GATE);
1101
}
1102
1103
int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
1104
enum amd_dpm_forced_level level)
1105
{
1106
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1107
enum amd_dpm_forced_level current_level;
1108
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1109
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1110
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1111
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1112
1113
if (!pp_funcs || !pp_funcs->force_performance_level)
1114
return 0;
1115
1116
if (adev->pm.dpm.thermal_active)
1117
return -EINVAL;
1118
1119
current_level = amdgpu_dpm_get_performance_level(adev);
1120
if (current_level == level)
1121
return 0;
1122
1123
if (!(current_level & profile_mode_mask) &&
1124
(level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
1125
return -EINVAL;
1126
1127
if (adev->asic_type == CHIP_RAVEN) {
1128
if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
1129
if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1130
level == AMD_DPM_FORCED_LEVEL_MANUAL)
1131
amdgpu_gfx_off_ctrl(adev, false);
1132
else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
1133
level != AMD_DPM_FORCED_LEVEL_MANUAL)
1134
amdgpu_gfx_off_ctrl(adev, true);
1135
}
1136
}
1137
1138
if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
1139
amdgpu_dpm_enter_umd_state(adev);
1140
else if ((current_level & profile_mode_mask) &&
1141
!(level & profile_mode_mask))
1142
amdgpu_dpm_exit_umd_state(adev);
1143
1144
mutex_lock(&adev->pm.mutex);
1145
1146
if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1147
level)) {
1148
mutex_unlock(&adev->pm.mutex);
1149
/* If new level failed, retain the umd state as before */
1150
if (!(current_level & profile_mode_mask) &&
1151
(level & profile_mode_mask))
1152
amdgpu_dpm_exit_umd_state(adev);
1153
else if ((current_level & profile_mode_mask) &&
1154
!(level & profile_mode_mask))
1155
amdgpu_dpm_enter_umd_state(adev);
1156
1157
return -EINVAL;
1158
}
1159
1160
adev->pm.dpm.forced_level = level;
1161
1162
mutex_unlock(&adev->pm.mutex);
1163
1164
return 0;
1165
}
1166
1167
int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1168
struct pp_states_info *states)
1169
{
1170
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1171
int ret = 0;
1172
1173
if (!pp_funcs->get_pp_num_states)
1174
return -EOPNOTSUPP;
1175
1176
mutex_lock(&adev->pm.mutex);
1177
ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1178
states);
1179
mutex_unlock(&adev->pm.mutex);
1180
1181
return ret;
1182
}
1183
1184
int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1185
enum amd_pp_task task_id,
1186
enum amd_pm_state_type *user_state)
1187
{
1188
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1189
int ret = 0;
1190
1191
if (!pp_funcs->dispatch_tasks)
1192
return -EOPNOTSUPP;
1193
1194
mutex_lock(&adev->pm.mutex);
1195
ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1196
task_id,
1197
user_state);
1198
mutex_unlock(&adev->pm.mutex);
1199
1200
return ret;
1201
}
1202
1203
int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1204
{
1205
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1206
int ret = 0;
1207
1208
if (!pp_funcs->get_pp_table)
1209
return 0;
1210
1211
mutex_lock(&adev->pm.mutex);
1212
ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1213
table);
1214
mutex_unlock(&adev->pm.mutex);
1215
1216
return ret;
1217
}
1218
1219
int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1220
uint32_t type,
1221
long *input,
1222
uint32_t size)
1223
{
1224
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1225
int ret = 0;
1226
1227
if (!pp_funcs->set_fine_grain_clk_vol)
1228
return 0;
1229
1230
mutex_lock(&adev->pm.mutex);
1231
ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1232
type,
1233
input,
1234
size);
1235
mutex_unlock(&adev->pm.mutex);
1236
1237
return ret;
1238
}
1239
1240
int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1241
uint32_t type,
1242
long *input,
1243
uint32_t size)
1244
{
1245
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1246
int ret = 0;
1247
1248
if (!pp_funcs->odn_edit_dpm_table)
1249
return 0;
1250
1251
mutex_lock(&adev->pm.mutex);
1252
ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1253
type,
1254
input,
1255
size);
1256
mutex_unlock(&adev->pm.mutex);
1257
1258
return ret;
1259
}
1260
1261
int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1262
enum pp_clock_type type,
1263
char *buf)
1264
{
1265
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1266
int ret = 0;
1267
1268
if (!pp_funcs->print_clock_levels)
1269
return 0;
1270
1271
mutex_lock(&adev->pm.mutex);
1272
ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1273
type,
1274
buf);
1275
mutex_unlock(&adev->pm.mutex);
1276
1277
return ret;
1278
}
1279
1280
int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1281
enum pp_clock_type type,
1282
char *buf,
1283
int *offset)
1284
{
1285
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1286
int ret = 0;
1287
1288
if (!pp_funcs->emit_clock_levels)
1289
return -ENOENT;
1290
1291
mutex_lock(&adev->pm.mutex);
1292
ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1293
type,
1294
buf,
1295
offset);
1296
mutex_unlock(&adev->pm.mutex);
1297
1298
return ret;
1299
}
1300
1301
int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1302
uint64_t ppfeature_masks)
1303
{
1304
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1305
int ret = 0;
1306
1307
if (!pp_funcs->set_ppfeature_status)
1308
return 0;
1309
1310
mutex_lock(&adev->pm.mutex);
1311
ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1312
ppfeature_masks);
1313
mutex_unlock(&adev->pm.mutex);
1314
1315
return ret;
1316
}
1317
1318
int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1319
{
1320
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1321
int ret = 0;
1322
1323
if (!pp_funcs->get_ppfeature_status)
1324
return 0;
1325
1326
mutex_lock(&adev->pm.mutex);
1327
ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1328
buf);
1329
mutex_unlock(&adev->pm.mutex);
1330
1331
return ret;
1332
}
1333
1334
int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1335
enum pp_clock_type type,
1336
uint32_t mask)
1337
{
1338
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1339
int ret = 0;
1340
1341
if (!pp_funcs->force_clock_level)
1342
return 0;
1343
1344
mutex_lock(&adev->pm.mutex);
1345
ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1346
type,
1347
mask);
1348
mutex_unlock(&adev->pm.mutex);
1349
1350
return ret;
1351
}
1352
1353
int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1354
{
1355
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1356
int ret = 0;
1357
1358
if (!pp_funcs->get_sclk_od)
1359
return -EOPNOTSUPP;
1360
1361
mutex_lock(&adev->pm.mutex);
1362
ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1363
mutex_unlock(&adev->pm.mutex);
1364
1365
return ret;
1366
}
1367
1368
int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1369
{
1370
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1371
1372
if (is_support_sw_smu(adev))
1373
return -EOPNOTSUPP;
1374
1375
mutex_lock(&adev->pm.mutex);
1376
if (pp_funcs->set_sclk_od)
1377
pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1378
mutex_unlock(&adev->pm.mutex);
1379
1380
if (amdgpu_dpm_dispatch_task(adev,
1381
AMD_PP_TASK_READJUST_POWER_STATE,
1382
NULL) == -EOPNOTSUPP) {
1383
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1384
amdgpu_dpm_compute_clocks(adev);
1385
}
1386
1387
return 0;
1388
}
1389
1390
int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1391
{
1392
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1393
int ret = 0;
1394
1395
if (!pp_funcs->get_mclk_od)
1396
return -EOPNOTSUPP;
1397
1398
mutex_lock(&adev->pm.mutex);
1399
ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1400
mutex_unlock(&adev->pm.mutex);
1401
1402
return ret;
1403
}
1404
1405
int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1406
{
1407
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1408
1409
if (is_support_sw_smu(adev))
1410
return -EOPNOTSUPP;
1411
1412
mutex_lock(&adev->pm.mutex);
1413
if (pp_funcs->set_mclk_od)
1414
pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1415
mutex_unlock(&adev->pm.mutex);
1416
1417
if (amdgpu_dpm_dispatch_task(adev,
1418
AMD_PP_TASK_READJUST_POWER_STATE,
1419
NULL) == -EOPNOTSUPP) {
1420
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1421
amdgpu_dpm_compute_clocks(adev);
1422
}
1423
1424
return 0;
1425
}
1426
1427
int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1428
char *buf)
1429
{
1430
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1431
int ret = 0;
1432
1433
if (!pp_funcs->get_power_profile_mode)
1434
return -EOPNOTSUPP;
1435
1436
mutex_lock(&adev->pm.mutex);
1437
ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1438
buf);
1439
mutex_unlock(&adev->pm.mutex);
1440
1441
return ret;
1442
}
1443
1444
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1445
long *input, uint32_t size)
1446
{
1447
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1448
int ret = 0;
1449
1450
if (!pp_funcs->set_power_profile_mode)
1451
return 0;
1452
1453
mutex_lock(&adev->pm.mutex);
1454
ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1455
input,
1456
size);
1457
mutex_unlock(&adev->pm.mutex);
1458
1459
return ret;
1460
}
1461
1462
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1463
{
1464
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1465
int ret = 0;
1466
1467
if (!pp_funcs->get_gpu_metrics)
1468
return 0;
1469
1470
mutex_lock(&adev->pm.mutex);
1471
ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1472
table);
1473
mutex_unlock(&adev->pm.mutex);
1474
1475
return ret;
1476
}
1477
1478
ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1479
size_t size)
1480
{
1481
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1482
int ret = 0;
1483
1484
if (!pp_funcs->get_pm_metrics)
1485
return -EOPNOTSUPP;
1486
1487
mutex_lock(&adev->pm.mutex);
1488
ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1489
size);
1490
mutex_unlock(&adev->pm.mutex);
1491
1492
return ret;
1493
}
1494
1495
int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1496
uint32_t *fan_mode)
1497
{
1498
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1499
int ret = 0;
1500
1501
if (!pp_funcs->get_fan_control_mode)
1502
return -EOPNOTSUPP;
1503
1504
mutex_lock(&adev->pm.mutex);
1505
ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1506
fan_mode);
1507
mutex_unlock(&adev->pm.mutex);
1508
1509
return ret;
1510
}
1511
1512
int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1513
uint32_t speed)
1514
{
1515
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1516
int ret = 0;
1517
1518
if (!pp_funcs->set_fan_speed_pwm)
1519
return -EOPNOTSUPP;
1520
1521
mutex_lock(&adev->pm.mutex);
1522
ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1523
speed);
1524
mutex_unlock(&adev->pm.mutex);
1525
1526
return ret;
1527
}
1528
1529
int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1530
uint32_t *speed)
1531
{
1532
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1533
int ret = 0;
1534
1535
if (!pp_funcs->get_fan_speed_pwm)
1536
return -EOPNOTSUPP;
1537
1538
mutex_lock(&adev->pm.mutex);
1539
ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1540
speed);
1541
mutex_unlock(&adev->pm.mutex);
1542
1543
return ret;
1544
}
1545
1546
int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1547
uint32_t *speed)
1548
{
1549
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1550
int ret = 0;
1551
1552
if (!pp_funcs->get_fan_speed_rpm)
1553
return -EOPNOTSUPP;
1554
1555
mutex_lock(&adev->pm.mutex);
1556
ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1557
speed);
1558
mutex_unlock(&adev->pm.mutex);
1559
1560
return ret;
1561
}
1562
1563
int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1564
uint32_t speed)
1565
{
1566
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1567
int ret = 0;
1568
1569
if (!pp_funcs->set_fan_speed_rpm)
1570
return -EOPNOTSUPP;
1571
1572
mutex_lock(&adev->pm.mutex);
1573
ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1574
speed);
1575
mutex_unlock(&adev->pm.mutex);
1576
1577
return ret;
1578
}
1579
1580
int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1581
uint32_t mode)
1582
{
1583
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1584
int ret = 0;
1585
1586
if (!pp_funcs->set_fan_control_mode)
1587
return -EOPNOTSUPP;
1588
1589
mutex_lock(&adev->pm.mutex);
1590
ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1591
mode);
1592
mutex_unlock(&adev->pm.mutex);
1593
1594
return ret;
1595
}
1596
1597
int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1598
uint32_t *limit,
1599
enum pp_power_limit_level pp_limit_level,
1600
enum pp_power_type power_type)
1601
{
1602
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1603
int ret = 0;
1604
1605
if (!pp_funcs->get_power_limit)
1606
return -ENODATA;
1607
1608
mutex_lock(&adev->pm.mutex);
1609
ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1610
limit,
1611
pp_limit_level,
1612
power_type);
1613
mutex_unlock(&adev->pm.mutex);
1614
1615
return ret;
1616
}
1617
1618
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1619
uint32_t limit)
1620
{
1621
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1622
int ret = 0;
1623
1624
if (!pp_funcs->set_power_limit)
1625
return -EINVAL;
1626
1627
mutex_lock(&adev->pm.mutex);
1628
ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1629
limit);
1630
mutex_unlock(&adev->pm.mutex);
1631
1632
return ret;
1633
}
1634
1635
int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1636
{
1637
bool cclk_dpm_supported = false;
1638
1639
if (!is_support_sw_smu(adev))
1640
return false;
1641
1642
mutex_lock(&adev->pm.mutex);
1643
cclk_dpm_supported = is_support_cclk_dpm(adev);
1644
mutex_unlock(&adev->pm.mutex);
1645
1646
return (int)cclk_dpm_supported;
1647
}
1648
1649
int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1650
struct seq_file *m)
1651
{
1652
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1653
1654
if (!pp_funcs->debugfs_print_current_performance_level)
1655
return -EOPNOTSUPP;
1656
1657
mutex_lock(&adev->pm.mutex);
1658
pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1659
m);
1660
mutex_unlock(&adev->pm.mutex);
1661
1662
return 0;
1663
}
1664
1665
int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1666
void **addr,
1667
size_t *size)
1668
{
1669
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1670
int ret = 0;
1671
1672
if (!pp_funcs->get_smu_prv_buf_details)
1673
return -ENOSYS;
1674
1675
mutex_lock(&adev->pm.mutex);
1676
ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1677
addr,
1678
size);
1679
mutex_unlock(&adev->pm.mutex);
1680
1681
return ret;
1682
}
1683
1684
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1685
{
1686
if (is_support_sw_smu(adev)) {
1687
struct smu_context *smu = adev->powerplay.pp_handle;
1688
1689
return (smu->od_enabled || smu->is_apu);
1690
} else {
1691
struct pp_hwmgr *hwmgr;
1692
1693
/*
1694
* dpm on some legacy asics don't carry od_enabled member
1695
* as its pp_handle is casted directly from adev.
1696
*/
1697
if (amdgpu_dpm_is_legacy_dpm(adev))
1698
return false;
1699
1700
hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1701
1702
return hwmgr->od_enabled;
1703
}
1704
}
1705
1706
int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev)
1707
{
1708
if (is_support_sw_smu(adev)) {
1709
struct smu_context *smu = adev->powerplay.pp_handle;
1710
1711
return smu->od_enabled;
1712
} else {
1713
struct pp_hwmgr *hwmgr;
1714
1715
/*
1716
* dpm on some legacy asics don't carry od_enabled member
1717
* as its pp_handle is casted directly from adev.
1718
*/
1719
if (amdgpu_dpm_is_legacy_dpm(adev))
1720
return false;
1721
1722
hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1723
1724
return hwmgr->od_enabled;
1725
}
1726
}
1727
1728
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1729
const char *buf,
1730
size_t size)
1731
{
1732
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1733
int ret = 0;
1734
1735
if (!pp_funcs->set_pp_table)
1736
return -EOPNOTSUPP;
1737
1738
mutex_lock(&adev->pm.mutex);
1739
ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1740
buf,
1741
size);
1742
mutex_unlock(&adev->pm.mutex);
1743
1744
return ret;
1745
}
1746
1747
int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1748
{
1749
struct smu_context *smu = adev->powerplay.pp_handle;
1750
1751
if (!is_support_sw_smu(adev))
1752
return INT_MAX;
1753
1754
return smu->cpu_core_num;
1755
}
1756
1757
void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1758
{
1759
if (!is_support_sw_smu(adev))
1760
return;
1761
1762
amdgpu_smu_stb_debug_fs_init(adev);
1763
}
1764
1765
int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1766
const struct amd_pp_display_configuration *input)
1767
{
1768
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1769
int ret = 0;
1770
1771
if (!pp_funcs->display_configuration_change)
1772
return 0;
1773
1774
mutex_lock(&adev->pm.mutex);
1775
ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1776
input);
1777
mutex_unlock(&adev->pm.mutex);
1778
1779
return ret;
1780
}
1781
1782
int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1783
enum amd_pp_clock_type type,
1784
struct amd_pp_clocks *clocks)
1785
{
1786
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1787
int ret = 0;
1788
1789
if (!pp_funcs->get_clock_by_type)
1790
return 0;
1791
1792
mutex_lock(&adev->pm.mutex);
1793
ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1794
type,
1795
clocks);
1796
mutex_unlock(&adev->pm.mutex);
1797
1798
return ret;
1799
}
1800
1801
int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1802
struct amd_pp_simple_clock_info *clocks)
1803
{
1804
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1805
int ret = 0;
1806
1807
if (!pp_funcs->get_display_mode_validation_clocks)
1808
return 0;
1809
1810
mutex_lock(&adev->pm.mutex);
1811
ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1812
clocks);
1813
mutex_unlock(&adev->pm.mutex);
1814
1815
return ret;
1816
}
1817
1818
int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1819
enum amd_pp_clock_type type,
1820
struct pp_clock_levels_with_latency *clocks)
1821
{
1822
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1823
int ret = 0;
1824
1825
if (!pp_funcs->get_clock_by_type_with_latency)
1826
return 0;
1827
1828
mutex_lock(&adev->pm.mutex);
1829
ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1830
type,
1831
clocks);
1832
mutex_unlock(&adev->pm.mutex);
1833
1834
return ret;
1835
}
1836
1837
int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1838
enum amd_pp_clock_type type,
1839
struct pp_clock_levels_with_voltage *clocks)
1840
{
1841
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1842
int ret = 0;
1843
1844
if (!pp_funcs->get_clock_by_type_with_voltage)
1845
return 0;
1846
1847
mutex_lock(&adev->pm.mutex);
1848
ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1849
type,
1850
clocks);
1851
mutex_unlock(&adev->pm.mutex);
1852
1853
return ret;
1854
}
1855
1856
int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1857
void *clock_ranges)
1858
{
1859
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1860
int ret = 0;
1861
1862
if (!pp_funcs->set_watermarks_for_clocks_ranges)
1863
return -EOPNOTSUPP;
1864
1865
mutex_lock(&adev->pm.mutex);
1866
ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1867
clock_ranges);
1868
mutex_unlock(&adev->pm.mutex);
1869
1870
return ret;
1871
}
1872
1873
int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1874
struct pp_display_clock_request *clock)
1875
{
1876
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1877
int ret = 0;
1878
1879
if (!pp_funcs->display_clock_voltage_request)
1880
return -EOPNOTSUPP;
1881
1882
mutex_lock(&adev->pm.mutex);
1883
ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1884
clock);
1885
mutex_unlock(&adev->pm.mutex);
1886
1887
return ret;
1888
}
1889
1890
int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1891
struct amd_pp_clock_info *clocks)
1892
{
1893
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1894
int ret = 0;
1895
1896
if (!pp_funcs->get_current_clocks)
1897
return -EOPNOTSUPP;
1898
1899
mutex_lock(&adev->pm.mutex);
1900
ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1901
clocks);
1902
mutex_unlock(&adev->pm.mutex);
1903
1904
return ret;
1905
}
1906
1907
void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1908
{
1909
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1910
1911
if (!pp_funcs->notify_smu_enable_pwe)
1912
return;
1913
1914
mutex_lock(&adev->pm.mutex);
1915
pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1916
mutex_unlock(&adev->pm.mutex);
1917
}
1918
1919
int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1920
uint32_t count)
1921
{
1922
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1923
int ret = 0;
1924
1925
if (!pp_funcs->set_active_display_count)
1926
return -EOPNOTSUPP;
1927
1928
mutex_lock(&adev->pm.mutex);
1929
ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1930
count);
1931
mutex_unlock(&adev->pm.mutex);
1932
1933
return ret;
1934
}
1935
1936
int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1937
uint32_t clock)
1938
{
1939
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1940
int ret = 0;
1941
1942
if (!pp_funcs->set_min_deep_sleep_dcefclk)
1943
return -EOPNOTSUPP;
1944
1945
mutex_lock(&adev->pm.mutex);
1946
ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1947
clock);
1948
mutex_unlock(&adev->pm.mutex);
1949
1950
return ret;
1951
}
1952
1953
void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1954
uint32_t clock)
1955
{
1956
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1957
1958
if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1959
return;
1960
1961
mutex_lock(&adev->pm.mutex);
1962
pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1963
clock);
1964
mutex_unlock(&adev->pm.mutex);
1965
}
1966
1967
void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1968
uint32_t clock)
1969
{
1970
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1971
1972
if (!pp_funcs->set_hard_min_fclk_by_freq)
1973
return;
1974
1975
mutex_lock(&adev->pm.mutex);
1976
pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1977
clock);
1978
mutex_unlock(&adev->pm.mutex);
1979
}
1980
1981
int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1982
bool disable_memory_clock_switch)
1983
{
1984
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1985
int ret = 0;
1986
1987
if (!pp_funcs->display_disable_memory_clock_switch)
1988
return 0;
1989
1990
mutex_lock(&adev->pm.mutex);
1991
ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1992
disable_memory_clock_switch);
1993
mutex_unlock(&adev->pm.mutex);
1994
1995
return ret;
1996
}
1997
1998
int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1999
struct pp_smu_nv_clock_table *max_clocks)
2000
{
2001
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2002
int ret = 0;
2003
2004
if (!pp_funcs->get_max_sustainable_clocks_by_dc)
2005
return -EOPNOTSUPP;
2006
2007
mutex_lock(&adev->pm.mutex);
2008
ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
2009
max_clocks);
2010
mutex_unlock(&adev->pm.mutex);
2011
2012
return ret;
2013
}
2014
2015
enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
2016
unsigned int *clock_values_in_khz,
2017
unsigned int *num_states)
2018
{
2019
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2020
int ret = 0;
2021
2022
if (!pp_funcs->get_uclk_dpm_states)
2023
return -EOPNOTSUPP;
2024
2025
mutex_lock(&adev->pm.mutex);
2026
ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
2027
clock_values_in_khz,
2028
num_states);
2029
mutex_unlock(&adev->pm.mutex);
2030
2031
return ret;
2032
}
2033
2034
int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
2035
struct dpm_clocks *clock_table)
2036
{
2037
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2038
int ret = 0;
2039
2040
if (!pp_funcs->get_dpm_clock_table)
2041
return -EOPNOTSUPP;
2042
2043
mutex_lock(&adev->pm.mutex);
2044
ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
2045
clock_table);
2046
mutex_unlock(&adev->pm.mutex);
2047
2048
return ret;
2049
}
2050
2051
/**
2052
* amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute
2053
* partition
2054
* @adev: Pointer to the device.
2055
* @type: Identifier for the temperature type metrics to be fetched.
2056
* @table: Pointer to a buffer where the metrics will be stored. If NULL, the
2057
* function returns the size of the metrics structure.
2058
*
2059
* This function retrieves metrics for a specific temperature type, If the
2060
* table parameter is NULL, the function returns the size of the metrics
2061
* structure without populating it.
2062
*
2063
* Return: Size of the metrics structure on success, or a negative error code on failure.
2064
*/
2065
ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
2066
enum smu_temp_metric_type type, void *table)
2067
{
2068
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2069
int ret;
2070
2071
if (!pp_funcs->get_temp_metrics ||
2072
!amdgpu_dpm_is_temp_metrics_supported(adev, type))
2073
return -EOPNOTSUPP;
2074
2075
mutex_lock(&adev->pm.mutex);
2076
ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table);
2077
mutex_unlock(&adev->pm.mutex);
2078
2079
return ret;
2080
}
2081
2082
/**
2083
* amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support
2084
* is available
2085
* @adev: Pointer to the device.
2086
* @type: Identifier for the temperature type metrics to be fetched.
2087
*
2088
* This function returns metrics if specific temperature metrics type is supported or not.
2089
*
2090
* Return: True in case of metrics type supported else false.
2091
*/
2092
bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
2093
enum smu_temp_metric_type type)
2094
{
2095
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2096
bool support_temp_metrics = false;
2097
2098
if (!pp_funcs->temp_metrics_is_supported)
2099
return support_temp_metrics;
2100
2101
if (is_support_sw_smu(adev)) {
2102
mutex_lock(&adev->pm.mutex);
2103
support_temp_metrics =
2104
pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type);
2105
mutex_unlock(&adev->pm.mutex);
2106
}
2107
2108
return support_temp_metrics;
2109
}
2110
2111
/**
2112
* amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
2113
* partition
2114
* @adev: Pointer to the device.
2115
* @xcp_id: Identifier of the XCP for which metrics are to be retrieved.
2116
* @table: Pointer to a buffer where the metrics will be stored. If NULL, the
2117
* function returns the size of the metrics structure.
2118
*
2119
* This function retrieves metrics for a specific XCP, including details such as
2120
* VCN/JPEG activity, clock frequencies, and other performance metrics. If the
2121
* table parameter is NULL, the function returns the size of the metrics
2122
* structure without populating it.
2123
*
2124
* Return: Size of the metrics structure on success, or a negative error code on failure.
2125
*/
2126
ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
2127
void *table)
2128
{
2129
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2130
int ret = 0;
2131
2132
if (!pp_funcs->get_xcp_metrics)
2133
return 0;
2134
2135
mutex_lock(&adev->pm.mutex);
2136
ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id,
2137
table);
2138
mutex_unlock(&adev->pm.mutex);
2139
2140
return ret;
2141
}
2142
2143