Path: blob/master/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
29294 views
/*1* Copyright 2020 Advanced Micro Devices, Inc.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice shall be included in11* all copies or substantial portions of the Software.12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL16* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR17* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,18* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR19* OTHER DEALINGS IN THE SOFTWARE.20*/2122#define SWSMU_CODE_LAYER_L42324#include "amdgpu.h"25#include "amdgpu_smu.h"26#include "smu_cmn.h"27#include "soc15_common.h"2829/*30* DO NOT use these for err/warn/info/debug messages.31* Use dev_err, dev_warn, dev_info and dev_dbg instead.32* They are more MGPU friendly.33*/34#undef pr_err35#undef pr_warn36#undef pr_info37#undef pr_debug3839#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL4041const int link_speed[] = {25, 50, 80, 160, 320, 640};4243#undef __SMU_DUMMY_MAP44#define __SMU_DUMMY_MAP(type) #type45static const char * const __smu_message_names[] = {46SMU_MESSAGE_TYPES47};4849#define smu_cmn_call_asic_func(intf, smu, args...) \50((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \51(smu)->ppt_funcs->intf(smu, ##args) : \52-ENOTSUPP) : \53-EINVAL)5455static const char *smu_get_message_name(struct smu_context *smu,56enum smu_message_type type)57{58if (type >= SMU_MSG_MAX_COUNT)59return "unknown smu message";6061return __smu_message_names[type];62}6364static void smu_cmn_read_arg(struct smu_context *smu,65uint32_t *arg)66{67struct amdgpu_device *adev = smu->adev;6869*arg = RREG32(smu->param_reg);70}7172/* Redefine the SMU error codes here.73*74* Note that these definitions are redundant and should be removed75* when the SMU has exported a unified header file containing these76* macros, which header file we can just include and use the SMU's77* macros. At the moment, these error codes are defined by the SMU78* per-ASIC unfortunately, yet we're a one driver for all ASICs.79*/80#define SMU_RESP_NONE 081#define SMU_RESP_OK 182#define SMU_RESP_CMD_FAIL 0xFF83#define SMU_RESP_CMD_UNKNOWN 0xFE84#define SMU_RESP_CMD_BAD_PREREQ 0xFD85#define SMU_RESP_BUSY_OTHER 0xFC86#define SMU_RESP_DEBUG_END 0xFB8788#define SMU_RESP_UNEXP (~0U)89/**90* __smu_cmn_poll_stat -- poll for a status from the SMU91* @smu: a pointer to SMU context92*93* Returns the status of the SMU, which could be,94* 0, the SMU is busy with your command;95* 1, execution status: success, execution result: success;96* 0xFF, execution status: success, execution result: failure;97* 0xFE, unknown command;98* 0xFD, valid command, but bad (command) prerequisites;99* 0xFC, the command was rejected as the SMU is busy;100* 0xFB, "SMC_Result_DebugDataDumpEnd".101*102* The values here are not defined by macros, because I'd rather we103* include a single header file which defines them, which is104* maintained by the SMU FW team, so that we're impervious to firmware105* changes. At the moment those values are defined in various header106* files, one for each ASIC, yet here we're a single ASIC-agnostic107* interface. Such a change can be followed-up by a subsequent patch.108*/109static u32 __smu_cmn_poll_stat(struct smu_context *smu)110{111struct amdgpu_device *adev = smu->adev;112int timeout = adev->usec_timeout * 20;113u32 reg;114115for ( ; timeout > 0; timeout--) {116reg = RREG32(smu->resp_reg);117if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)118break;119120udelay(1);121}122123return reg;124}125126static void __smu_cmn_reg_print_error(struct smu_context *smu,127u32 reg_c2pmsg_90,128int msg_index,129u32 param,130enum smu_message_type msg)131{132struct amdgpu_device *adev = smu->adev;133const char *message = smu_get_message_name(smu, msg);134u32 msg_idx, prm;135136switch (reg_c2pmsg_90) {137case SMU_RESP_NONE: {138msg_idx = RREG32(smu->msg_reg);139prm = RREG32(smu->param_reg);140dev_err_ratelimited(adev->dev,141"SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",142msg_idx, prm);143}144break;145case SMU_RESP_OK:146/* The SMU executed the command. It completed with a147* successful result.148*/149break;150case SMU_RESP_CMD_FAIL:151/* The SMU executed the command. It completed with an152* unsuccessful result.153*/154break;155case SMU_RESP_CMD_UNKNOWN:156dev_err_ratelimited(adev->dev,157"SMU: unknown command: index:%d param:0x%08X message:%s",158msg_index, param, message);159break;160case SMU_RESP_CMD_BAD_PREREQ:161dev_err_ratelimited(adev->dev,162"SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",163msg_index, param, message);164break;165case SMU_RESP_BUSY_OTHER:166dev_err_ratelimited(adev->dev,167"SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",168msg_index, param, message);169break;170case SMU_RESP_DEBUG_END:171dev_err_ratelimited(adev->dev,172"SMU: I'm debugging!");173break;174case SMU_RESP_UNEXP:175if (amdgpu_device_bus_status_check(smu->adev)) {176/* print error immediately if device is off the bus */177dev_err(adev->dev,178"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",179reg_c2pmsg_90, msg_index, param, message);180break;181}182fallthrough;183default:184dev_err_ratelimited(adev->dev,185"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",186reg_c2pmsg_90, msg_index, param, message);187break;188}189}190191static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)192{193int res;194195switch (reg_c2pmsg_90) {196case SMU_RESP_NONE:197/* The SMU is busy--still executing your command.198*/199res = -ETIME;200break;201case SMU_RESP_OK:202res = 0;203break;204case SMU_RESP_CMD_FAIL:205/* Command completed successfully, but the command206* status was failure.207*/208res = -EIO;209break;210case SMU_RESP_CMD_UNKNOWN:211/* Unknown command--ignored by the SMU.212*/213res = -EOPNOTSUPP;214break;215case SMU_RESP_CMD_BAD_PREREQ:216/* Valid command--bad prerequisites.217*/218res = -EINVAL;219break;220case SMU_RESP_BUSY_OTHER:221/* The SMU is busy with other commands. The client222* should retry in 10 us.223*/224res = -EBUSY;225break;226default:227/* Unknown or debug response from the SMU.228*/229res = -EREMOTEIO;230break;231}232233return res;234}235236static void __smu_cmn_send_msg(struct smu_context *smu,237u16 msg,238u32 param)239{240struct amdgpu_device *adev = smu->adev;241242WREG32(smu->resp_reg, 0);243WREG32(smu->param_reg, param);244WREG32(smu->msg_reg, msg);245}246247static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu,248enum smu_message_type msg)249{250return smu->message_map[msg].flags;251}252253static int __smu_cmn_ras_filter_msg(struct smu_context *smu,254enum smu_message_type msg, bool *poll)255{256struct amdgpu_device *adev = smu->adev;257uint32_t flags, resp;258bool fed_status, pri;259260flags = __smu_cmn_get_msg_flags(smu, msg);261*poll = true;262263pri = !!(flags & SMU_MSG_NO_PRECHECK);264/* When there is RAS fatal error, FW won't process non-RAS priority265* messages. Don't allow any messages other than RAS priority messages.266*/267fed_status = amdgpu_ras_get_fed_status(adev);268if (fed_status) {269if (!(flags & SMU_MSG_RAS_PRI)) {270dev_dbg(adev->dev,271"RAS error detected, skip sending %s",272smu_get_message_name(smu, msg));273return -EACCES;274}275}276277if (pri || fed_status) {278/* FW will ignore non-priority messages when a RAS fatal error279* or reset condition is detected. Hence it is possible that a280* previous message wouldn't have got response. Allow to281* continue without polling for response status for priority282* messages.283*/284resp = RREG32(smu->resp_reg);285dev_dbg(adev->dev,286"Sending priority message %s response status: %x",287smu_get_message_name(smu, msg), resp);288if (resp == 0)289*poll = false;290}291292return 0;293}294295static int __smu_cmn_send_debug_msg(struct smu_context *smu,296u32 msg,297u32 param)298{299struct amdgpu_device *adev = smu->adev;300301WREG32(smu->debug_param_reg, param);302WREG32(smu->debug_msg_reg, msg);303WREG32(smu->debug_resp_reg, 0);304305return 0;306}307/**308* smu_cmn_send_msg_without_waiting -- send the message; don't wait for status309* @smu: pointer to an SMU context310* @msg_index: message index311* @param: message parameter to send to the SMU312*313* Send a message to the SMU with the parameter passed. Do not wait314* for status/result of the message, thus the "without_waiting".315*316* Return 0 on success, -errno on error if we weren't able to _send_317* the message for some reason. See __smu_cmn_reg2errno() for details318* of the -errno.319*/320int smu_cmn_send_msg_without_waiting(struct smu_context *smu,321uint16_t msg_index,322uint32_t param)323{324struct amdgpu_device *adev = smu->adev;325u32 reg;326int res;327328if (adev->no_hw_access)329return 0;330331if (smu->smc_fw_state == SMU_FW_HANG) {332dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");333res = -EREMOTEIO;334goto Out;335}336337if (smu->smc_fw_state == SMU_FW_INIT) {338smu->smc_fw_state = SMU_FW_RUNTIME;339} else {340reg = __smu_cmn_poll_stat(smu);341res = __smu_cmn_reg2errno(smu, reg);342if (reg == SMU_RESP_NONE || res == -EREMOTEIO)343goto Out;344}345346__smu_cmn_send_msg(smu, msg_index, param);347res = 0;348Out:349if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&350res && (res != -ETIME)) {351amdgpu_device_halt(adev);352WARN_ON(1);353}354355return res;356}357358/**359* smu_cmn_wait_for_response -- wait for response from the SMU360* @smu: pointer to an SMU context361*362* Wait for status from the SMU.363*364* Return 0 on success, -errno on error, indicating the execution365* status and result of the message being waited for. See366* __smu_cmn_reg2errno() for details of the -errno.367*/368int smu_cmn_wait_for_response(struct smu_context *smu)369{370u32 reg;371int res;372373reg = __smu_cmn_poll_stat(smu);374res = __smu_cmn_reg2errno(smu, reg);375376if (res == -EREMOTEIO)377smu->smc_fw_state = SMU_FW_HANG;378379if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&380res && (res != -ETIME)) {381amdgpu_device_halt(smu->adev);382WARN_ON(1);383}384385return res;386}387388/**389* smu_cmn_send_smc_msg_with_param -- send a message with parameter390* @smu: pointer to an SMU context391* @msg: message to send392* @param: parameter to send to the SMU393* @read_arg: pointer to u32 to return a value from the SMU back394* to the caller395*396* Send the message @msg with parameter @param to the SMU, wait for397* completion of the command, and return back a value from the SMU in398* @read_arg pointer.399*400* Return 0 on success, -errno when a problem is encountered sending401* message or receiving reply. If there is a PCI bus recovery or402* the destination is a virtual GPU which does not allow this message403* type, the message is simply dropped and success is also returned.404* See __smu_cmn_reg2errno() for details of the -errno.405*406* If we weren't able to send the message to the SMU, we also print407* the error to the standard log.408*409* Command completion status is printed only if the -errno is410* -EREMOTEIO, indicating that the SMU returned back an411* undefined/unknown/unspecified result. All other cases are412* well-defined, not printed, but instead given back to the client to413* decide what further to do.414*415* The return value, @read_arg is read back regardless, to give back416* more information to the client, which on error would most likely be417* @param, but we can't assume that. This also eliminates more418* conditionals.419*/420int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,421enum smu_message_type msg,422uint32_t param,423uint32_t *read_arg)424{425struct amdgpu_device *adev = smu->adev;426int res, index;427bool poll = true;428u32 reg;429430if (adev->no_hw_access)431return 0;432433index = smu_cmn_to_asic_specific_index(smu,434CMN2ASIC_MAPPING_MSG,435msg);436if (index < 0)437return index == -EACCES ? 0 : index;438439mutex_lock(&smu->message_lock);440441if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) {442res = __smu_cmn_ras_filter_msg(smu, msg, &poll);443if (res)444goto Out;445}446447if (smu->smc_fw_state == SMU_FW_HANG) {448dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");449res = -EREMOTEIO;450goto Out;451} else if (smu->smc_fw_state == SMU_FW_INIT) {452/* Ignore initial smu response register value */453poll = false;454smu->smc_fw_state = SMU_FW_RUNTIME;455}456457if (poll) {458reg = __smu_cmn_poll_stat(smu);459res = __smu_cmn_reg2errno(smu, reg);460if (reg == SMU_RESP_NONE || res == -EREMOTEIO) {461__smu_cmn_reg_print_error(smu, reg, index, param, msg);462goto Out;463}464}465__smu_cmn_send_msg(smu, (uint16_t) index, param);466reg = __smu_cmn_poll_stat(smu);467res = __smu_cmn_reg2errno(smu, reg);468if (res != 0) {469if (res == -EREMOTEIO)470smu->smc_fw_state = SMU_FW_HANG;471__smu_cmn_reg_print_error(smu, reg, index, param, msg);472}473if (read_arg) {474smu_cmn_read_arg(smu, read_arg);475dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n",476smu_get_message_name(smu, msg), index, param, reg, *read_arg);477} else {478dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",479smu_get_message_name(smu, msg), index, param, reg);480}481Out:482if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {483amdgpu_device_halt(adev);484WARN_ON(1);485}486487mutex_unlock(&smu->message_lock);488return res;489}490491int smu_cmn_send_smc_msg(struct smu_context *smu,492enum smu_message_type msg,493uint32_t *read_arg)494{495return smu_cmn_send_smc_msg_with_param(smu,496msg,4970,498read_arg);499}500501int smu_cmn_send_debug_smc_msg(struct smu_context *smu,502uint32_t msg)503{504return __smu_cmn_send_debug_msg(smu, msg, 0);505}506507int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,508uint32_t msg, uint32_t param)509{510return __smu_cmn_send_debug_msg(smu, msg, param);511}512513int smu_cmn_to_asic_specific_index(struct smu_context *smu,514enum smu_cmn2asic_mapping_type type,515uint32_t index)516{517struct cmn2asic_msg_mapping msg_mapping;518struct cmn2asic_mapping mapping;519520switch (type) {521case CMN2ASIC_MAPPING_MSG:522if (index >= SMU_MSG_MAX_COUNT ||523!smu->message_map)524return -EINVAL;525526msg_mapping = smu->message_map[index];527if (!msg_mapping.valid_mapping)528return -EINVAL;529530if (amdgpu_sriov_vf(smu->adev) &&531!(msg_mapping.flags & SMU_MSG_VF_FLAG))532return -EACCES;533534return msg_mapping.map_to;535536case CMN2ASIC_MAPPING_CLK:537if (index >= SMU_CLK_COUNT ||538!smu->clock_map)539return -EINVAL;540541mapping = smu->clock_map[index];542if (!mapping.valid_mapping)543return -EINVAL;544545return mapping.map_to;546547case CMN2ASIC_MAPPING_FEATURE:548if (index >= SMU_FEATURE_COUNT ||549!smu->feature_map)550return -EINVAL;551552mapping = smu->feature_map[index];553if (!mapping.valid_mapping)554return -EINVAL;555556return mapping.map_to;557558case CMN2ASIC_MAPPING_TABLE:559if (index >= SMU_TABLE_COUNT ||560!smu->table_map)561return -EINVAL;562563mapping = smu->table_map[index];564if (!mapping.valid_mapping)565return -EINVAL;566567return mapping.map_to;568569case CMN2ASIC_MAPPING_PWR:570if (index >= SMU_POWER_SOURCE_COUNT ||571!smu->pwr_src_map)572return -EINVAL;573574mapping = smu->pwr_src_map[index];575if (!mapping.valid_mapping)576return -EINVAL;577578return mapping.map_to;579580case CMN2ASIC_MAPPING_WORKLOAD:581if (index >= PP_SMC_POWER_PROFILE_COUNT ||582!smu->workload_map)583return -EINVAL;584585mapping = smu->workload_map[index];586if (!mapping.valid_mapping)587return -ENOTSUPP;588589return mapping.map_to;590591default:592return -EINVAL;593}594}595596int smu_cmn_feature_is_supported(struct smu_context *smu,597enum smu_feature_mask mask)598{599struct smu_feature *feature = &smu->smu_feature;600int feature_id;601602feature_id = smu_cmn_to_asic_specific_index(smu,603CMN2ASIC_MAPPING_FEATURE,604mask);605if (feature_id < 0)606return 0;607608WARN_ON(feature_id > feature->feature_num);609610return test_bit(feature_id, feature->supported);611}612613static int __smu_get_enabled_features(struct smu_context *smu,614uint64_t *enabled_features)615{616return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);617}618619int smu_cmn_feature_is_enabled(struct smu_context *smu,620enum smu_feature_mask mask)621{622struct amdgpu_device *adev = smu->adev;623uint64_t enabled_features;624int feature_id;625626if (__smu_get_enabled_features(smu, &enabled_features)) {627dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");628return 0;629}630631/*632* For Renoir and Cyan Skillfish, they are assumed to have all features633* enabled. Also considering they have no feature_map available, the634* check here can avoid unwanted feature_map check below.635*/636if (enabled_features == ULLONG_MAX)637return 1;638639feature_id = smu_cmn_to_asic_specific_index(smu,640CMN2ASIC_MAPPING_FEATURE,641mask);642if (feature_id < 0)643return 0;644645return test_bit(feature_id, (unsigned long *)&enabled_features);646}647648bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,649enum smu_clk_type clk_type)650{651enum smu_feature_mask feature_id = 0;652653switch (clk_type) {654case SMU_MCLK:655case SMU_UCLK:656feature_id = SMU_FEATURE_DPM_UCLK_BIT;657break;658case SMU_GFXCLK:659case SMU_SCLK:660feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;661break;662case SMU_SOCCLK:663feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;664break;665case SMU_VCLK:666case SMU_VCLK1:667feature_id = SMU_FEATURE_DPM_VCLK_BIT;668break;669case SMU_DCLK:670case SMU_DCLK1:671feature_id = SMU_FEATURE_DPM_DCLK_BIT;672break;673case SMU_FCLK:674feature_id = SMU_FEATURE_DPM_FCLK_BIT;675break;676default:677return true;678}679680if (!smu_cmn_feature_is_enabled(smu, feature_id))681return false;682683return true;684}685686int smu_cmn_get_enabled_mask(struct smu_context *smu,687uint64_t *feature_mask)688{689uint32_t *feature_mask_high;690uint32_t *feature_mask_low;691int ret = 0, index = 0;692693if (!feature_mask)694return -EINVAL;695696feature_mask_low = &((uint32_t *)feature_mask)[0];697feature_mask_high = &((uint32_t *)feature_mask)[1];698699index = smu_cmn_to_asic_specific_index(smu,700CMN2ASIC_MAPPING_MSG,701SMU_MSG_GetEnabledSmuFeatures);702if (index > 0) {703ret = smu_cmn_send_smc_msg_with_param(smu,704SMU_MSG_GetEnabledSmuFeatures,7050,706feature_mask_low);707if (ret)708return ret;709710ret = smu_cmn_send_smc_msg_with_param(smu,711SMU_MSG_GetEnabledSmuFeatures,7121,713feature_mask_high);714} else {715ret = smu_cmn_send_smc_msg(smu,716SMU_MSG_GetEnabledSmuFeaturesHigh,717feature_mask_high);718if (ret)719return ret;720721ret = smu_cmn_send_smc_msg(smu,722SMU_MSG_GetEnabledSmuFeaturesLow,723feature_mask_low);724}725726return ret;727}728729uint64_t smu_cmn_get_indep_throttler_status(730const unsigned long dep_status,731const uint8_t *throttler_map)732{733uint64_t indep_status = 0;734uint8_t dep_bit = 0;735736for_each_set_bit(dep_bit, &dep_status, 32)737indep_status |= 1ULL << throttler_map[dep_bit];738739return indep_status;740}741742int smu_cmn_feature_update_enable_state(struct smu_context *smu,743uint64_t feature_mask,744bool enabled)745{746int ret = 0;747748if (enabled) {749ret = smu_cmn_send_smc_msg_with_param(smu,750SMU_MSG_EnableSmuFeaturesLow,751lower_32_bits(feature_mask),752NULL);753if (ret)754return ret;755ret = smu_cmn_send_smc_msg_with_param(smu,756SMU_MSG_EnableSmuFeaturesHigh,757upper_32_bits(feature_mask),758NULL);759} else {760ret = smu_cmn_send_smc_msg_with_param(smu,761SMU_MSG_DisableSmuFeaturesLow,762lower_32_bits(feature_mask),763NULL);764if (ret)765return ret;766ret = smu_cmn_send_smc_msg_with_param(smu,767SMU_MSG_DisableSmuFeaturesHigh,768upper_32_bits(feature_mask),769NULL);770}771772return ret;773}774775int smu_cmn_feature_set_enabled(struct smu_context *smu,776enum smu_feature_mask mask,777bool enable)778{779int feature_id;780781feature_id = smu_cmn_to_asic_specific_index(smu,782CMN2ASIC_MAPPING_FEATURE,783mask);784if (feature_id < 0)785return -EINVAL;786787return smu_cmn_feature_update_enable_state(smu,7881ULL << feature_id,789enable);790}791792#undef __SMU_DUMMY_MAP793#define __SMU_DUMMY_MAP(fea) #fea794static const char *__smu_feature_names[] = {795SMU_FEATURE_MASKS796};797798static const char *smu_get_feature_name(struct smu_context *smu,799enum smu_feature_mask feature)800{801if (feature >= SMU_FEATURE_COUNT)802return "unknown smu feature";803return __smu_feature_names[feature];804}805806size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,807char *buf)808{809int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];810uint64_t feature_mask;811int i, feature_index;812uint32_t count = 0;813size_t size = 0;814815if (__smu_get_enabled_features(smu, &feature_mask))816return 0;817818size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",819upper_32_bits(feature_mask), lower_32_bits(feature_mask));820821memset(sort_feature, -1, sizeof(sort_feature));822823for (i = 0; i < SMU_FEATURE_COUNT; i++) {824feature_index = smu_cmn_to_asic_specific_index(smu,825CMN2ASIC_MAPPING_FEATURE,826i);827if (feature_index < 0)828continue;829830sort_feature[feature_index] = i;831}832833size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",834"No", "Feature", "Bit", "State");835836for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {837if (sort_feature[feature_index] < 0)838continue;839840size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",841count++,842smu_get_feature_name(smu, sort_feature[feature_index]),843feature_index,844!!test_bit(feature_index, (unsigned long *)&feature_mask) ?845"enabled" : "disabled");846}847848return size;849}850851int smu_cmn_set_pp_feature_mask(struct smu_context *smu,852uint64_t new_mask)853{854int ret = 0;855uint64_t feature_mask;856uint64_t feature_2_enabled = 0;857uint64_t feature_2_disabled = 0;858859ret = __smu_get_enabled_features(smu, &feature_mask);860if (ret)861return ret;862863feature_2_enabled = ~feature_mask & new_mask;864feature_2_disabled = feature_mask & ~new_mask;865866if (feature_2_enabled) {867ret = smu_cmn_feature_update_enable_state(smu,868feature_2_enabled,869true);870if (ret)871return ret;872}873if (feature_2_disabled) {874ret = smu_cmn_feature_update_enable_state(smu,875feature_2_disabled,876false);877if (ret)878return ret;879}880881return ret;882}883884/**885* smu_cmn_disable_all_features_with_exception - disable all dpm features886* except this specified by887* @mask888*889* @smu: smu_context pointer890* @mask: the dpm feature which should not be disabled891* SMU_FEATURE_COUNT: no exception, all dpm features892* to disable893*894* Returns:895* 0 on success or a negative error code on failure.896*/897int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,898enum smu_feature_mask mask)899{900uint64_t features_to_disable = U64_MAX;901int skipped_feature_id;902903if (mask != SMU_FEATURE_COUNT) {904skipped_feature_id = smu_cmn_to_asic_specific_index(smu,905CMN2ASIC_MAPPING_FEATURE,906mask);907if (skipped_feature_id < 0)908return -EINVAL;909910features_to_disable &= ~(1ULL << skipped_feature_id);911}912913return smu_cmn_feature_update_enable_state(smu,914features_to_disable,9150);916}917918int smu_cmn_get_smc_version(struct smu_context *smu,919uint32_t *if_version,920uint32_t *smu_version)921{922int ret = 0;923924if (!if_version && !smu_version)925return -EINVAL;926927if (smu->smc_fw_if_version && smu->smc_fw_version)928{929if (if_version)930*if_version = smu->smc_fw_if_version;931932if (smu_version)933*smu_version = smu->smc_fw_version;934935return 0;936}937938if (if_version) {939ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);940if (ret)941return ret;942943smu->smc_fw_if_version = *if_version;944}945946if (smu_version) {947ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);948if (ret)949return ret;950951smu->smc_fw_version = *smu_version;952}953954return ret;955}956957int smu_cmn_update_table(struct smu_context *smu,958enum smu_table_id table_index,959int argument,960void *table_data,961bool drv2smu)962{963struct smu_table_context *smu_table = &smu->smu_table;964struct amdgpu_device *adev = smu->adev;965struct smu_table *table = &smu_table->driver_table;966int table_id = smu_cmn_to_asic_specific_index(smu,967CMN2ASIC_MAPPING_TABLE,968table_index);969uint32_t table_size;970int ret = 0;971if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)972return -EINVAL;973974table_size = smu_table->tables[table_index].size;975976if (drv2smu) {977memcpy(table->cpu_addr, table_data, table_size);978/*979* Flush hdp cache: to guard the content seen by980* GPU is consitent with CPU.981*/982amdgpu_asic_flush_hdp(adev, NULL);983}984985ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?986SMU_MSG_TransferTableDram2Smu :987SMU_MSG_TransferTableSmu2Dram,988table_id | ((argument & 0xFFFF) << 16),989NULL);990if (ret)991return ret;992993if (!drv2smu) {994amdgpu_asic_invalidate_hdp(adev, NULL);995memcpy(table_data, table->cpu_addr, table_size);996}997998return 0;999}10001001int smu_cmn_write_watermarks_table(struct smu_context *smu)1002{1003void *watermarks_table = smu->smu_table.watermarks_table;10041005if (!watermarks_table)1006return -EINVAL;10071008return smu_cmn_update_table(smu,1009SMU_TABLE_WATERMARKS,10100,1011watermarks_table,1012true);1013}10141015int smu_cmn_write_pptable(struct smu_context *smu)1016{1017void *pptable = smu->smu_table.driver_pptable;10181019return smu_cmn_update_table(smu,1020SMU_TABLE_PPTABLE,10210,1022pptable,1023true);1024}10251026int smu_cmn_get_metrics_table(struct smu_context *smu,1027void *metrics_table,1028bool bypass_cache)1029{1030struct smu_table_context *smu_table = &smu->smu_table;1031uint32_t table_size =1032smu_table->tables[SMU_TABLE_SMU_METRICS].size;1033int ret = 0;10341035if (bypass_cache ||1036!smu_table->metrics_time ||1037time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {1038ret = smu_cmn_update_table(smu,1039SMU_TABLE_SMU_METRICS,10400,1041smu_table->metrics_table,1042false);1043if (ret) {1044dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");1045return ret;1046}1047smu_table->metrics_time = jiffies;1048}10491050if (metrics_table)1051memcpy(metrics_table, smu_table->metrics_table, table_size);10521053return 0;1054}10551056int smu_cmn_get_combo_pptable(struct smu_context *smu)1057{1058void *pptable = smu->smu_table.combo_pptable;10591060return smu_cmn_update_table(smu,1061SMU_TABLE_COMBO_PPTABLE,10620,1063pptable,1064false);1065}10661067int smu_cmn_set_mp1_state(struct smu_context *smu,1068enum pp_mp1_state mp1_state)1069{1070enum smu_message_type msg;1071int ret;10721073switch (mp1_state) {1074case PP_MP1_STATE_SHUTDOWN:1075msg = SMU_MSG_PrepareMp1ForShutdown;1076break;1077case PP_MP1_STATE_UNLOAD:1078msg = SMU_MSG_PrepareMp1ForUnload;1079break;1080case PP_MP1_STATE_RESET:1081msg = SMU_MSG_PrepareMp1ForReset;1082break;1083case PP_MP1_STATE_NONE:1084default:1085return 0;1086}10871088ret = smu_cmn_send_smc_msg(smu, msg, NULL);1089if (ret)1090dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");10911092return ret;1093}10941095bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)1096{1097struct pci_dev *p = NULL;1098bool snd_driver_loaded;10991100/*1101* If the ASIC comes with no audio function, we always assume1102* it is "enabled".1103*/1104p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),1105adev->pdev->bus->number, 1);1106if (!p)1107return true;11081109snd_driver_loaded = pci_is_enabled(p) ? true : false;11101111pci_dev_put(p);11121113return snd_driver_loaded;1114}11151116static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level)1117{1118if (level < 0 || !(policy->level_mask & BIT(level)))1119return "Invalid";11201121switch (level) {1122case SOC_PSTATE_DEFAULT:1123return "soc_pstate_default";1124case SOC_PSTATE_0:1125return "soc_pstate_0";1126case SOC_PSTATE_1:1127return "soc_pstate_1";1128case SOC_PSTATE_2:1129return "soc_pstate_2";1130}11311132return "Invalid";1133}11341135static struct smu_dpm_policy_desc pstate_policy_desc = {1136.name = STR_SOC_PSTATE_POLICY,1137.get_desc = smu_soc_policy_get_desc,1138};11391140void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy)1141{1142policy->desc = &pstate_policy_desc;1143}11441145static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy,1146int level)1147{1148if (level < 0 || !(policy->level_mask & BIT(level)))1149return "Invalid";11501151switch (level) {1152case XGMI_PLPD_DISALLOW:1153return "plpd_disallow";1154case XGMI_PLPD_DEFAULT:1155return "plpd_default";1156case XGMI_PLPD_OPTIMIZED:1157return "plpd_optimized";1158}11591160return "Invalid";1161}11621163static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = {1164.name = STR_XGMI_PLPD_POLICY,1165.get_desc = smu_xgmi_plpd_policy_get_desc,1166};11671168void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)1169{1170policy->desc = &xgmi_plpd_policy_desc;1171}11721173void smu_cmn_get_backend_workload_mask(struct smu_context *smu,1174u32 workload_mask,1175u32 *backend_workload_mask)1176{1177int workload_type;1178u32 profile_mode;11791180*backend_workload_mask = 0;11811182for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {1183if (!(workload_mask & (1 << profile_mode)))1184continue;11851186/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */1187workload_type = smu_cmn_to_asic_specific_index(smu,1188CMN2ASIC_MAPPING_WORKLOAD,1189profile_mode);11901191if (workload_type < 0)1192continue;11931194*backend_workload_mask |= 1 << workload_type;1195}1196}119711981199