Path: blob/master/drivers/accel/habanalabs/common/device.c
29282 views
// SPDX-License-Identifier: GPL-2.012/*3* Copyright 2016-2022 HabanaLabs, Ltd.4* All Rights Reserved.5*/67#define pr_fmt(fmt) "habanalabs: " fmt89#include <uapi/drm/habanalabs_accel.h>10#include "habanalabs.h"1112#include <linux/pci.h>13#include <linux/hwmon.h>14#include <linux/vmalloc.h>1516#include <drm/drm_accel.h>17#include <drm/drm_drv.h>1819#include <trace/events/habanalabs.h>2021#define HL_RESET_DELAY_USEC 10000 /* 10ms */2223#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 302425enum dma_alloc_type {26DMA_ALLOC_COHERENT,27DMA_ALLOC_POOL,28};2930#define MEM_SCRUB_DEFAULT_VAL 0x11223344556677883132static void hl_device_heartbeat(struct work_struct *work);3334/*35* hl_set_dram_bar- sets the bar to allow later access to address36*37* @hdev: pointer to habanalabs device structure.38* @addr: the address the caller wants to access.39* @region: the PCI region.40* @new_bar_region_base: the new BAR region base address.41*42* @return: the old BAR base address on success, U64_MAX for failure.43* The caller should set it back to the old address after use.44*45* In case the bar space does not cover the whole address space,46* the bar base address should be set to allow access to a given address.47* This function can be called also if the bar doesn't need to be set,48* in that case it just won't change the base.49*/50static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region,51u64 *new_bar_region_base)52{53struct asic_fixed_properties *prop = &hdev->asic_prop;54u64 bar_base_addr, old_base;5556if (is_power_of_2(prop->dram_pci_bar_size))57bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);58else59bar_base_addr = region->region_base +60div64_u64((addr - region->region_base), prop->dram_pci_bar_size) *61prop->dram_pci_bar_size;6263old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);6465/* in case of success we need to update the new BAR base */66if ((old_base != U64_MAX) && new_bar_region_base)67*new_bar_region_base = bar_base_addr;6869return old_base;70}7172int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,73enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar)74{75struct pci_mem_region *region = &hdev->pci_mem_region[region_type];76u64 old_base = 0, rc, bar_region_base = region->region_base;77void __iomem *acc_addr;7879if (set_dram_bar) {80old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base);81if (old_base == U64_MAX)82return -EIO;83}8485acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +86(addr - bar_region_base);8788switch (acc_type) {89case DEBUGFS_READ8:90*val = readb(acc_addr);91break;92case DEBUGFS_WRITE8:93writeb(*val, acc_addr);94break;95case DEBUGFS_READ32:96*val = readl(acc_addr);97break;98case DEBUGFS_WRITE32:99writel(*val, acc_addr);100break;101case DEBUGFS_READ64:102*val = readq(acc_addr);103break;104case DEBUGFS_WRITE64:105writeq(*val, acc_addr);106break;107}108109if (set_dram_bar) {110rc = hl_set_dram_bar(hdev, old_base, region, NULL);111if (rc == U64_MAX)112return -EIO;113}114115return 0;116}117118static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,119gfp_t flag, enum dma_alloc_type alloc_type,120const char *caller)121{122void *ptr = NULL;123124switch (alloc_type) {125case DMA_ALLOC_COHERENT:126ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);127break;128case DMA_ALLOC_POOL:129ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);130break;131}132133if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))134trace_habanalabs_dma_alloc(&(hdev)->pdev->dev, (u64) (uintptr_t) ptr, *dma_handle,135size, caller);136137return ptr;138}139140static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,141dma_addr_t dma_handle, enum dma_alloc_type alloc_type,142const char *caller)143{144/* this is needed to avoid warning on using freed pointer */145u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr;146147switch (alloc_type) {148case DMA_ALLOC_COHERENT:149hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);150break;151case DMA_ALLOC_POOL:152hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);153break;154}155156trace_habanalabs_dma_free(&(hdev)->pdev->dev, store_cpu_addr, dma_handle, size, caller);157}158159void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,160gfp_t flag, const char *caller)161{162return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);163}164165void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,166dma_addr_t dma_handle, const char *caller)167{168hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);169}170171void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,172dma_addr_t *dma_handle, const char *caller)173{174return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);175}176177void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,178const char *caller)179{180hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);181}182183void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)184{185return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);186}187188void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)189{190hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);191}192193int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,194enum dma_data_direction dir, const char *caller)195{196struct asic_fixed_properties *prop = &hdev->asic_prop;197struct scatterlist *sg;198int rc, i;199200rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir);201if (rc)202return rc;203204if (!trace_habanalabs_dma_map_page_enabled())205return 0;206207for_each_sgtable_dma_sg(sgt, sg, i)208trace_habanalabs_dma_map_page(&(hdev)->pdev->dev,209page_to_phys(sg_page(sg)),210sg->dma_address - prop->device_dma_offset_for_host_access,211#ifdef CONFIG_NEED_SG_DMA_LENGTH212sg->dma_length,213#else214sg->length,215#endif216dir, caller);217218return 0;219}220221int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,222enum dma_data_direction dir)223{224struct asic_fixed_properties *prop = &hdev->asic_prop;225struct scatterlist *sg;226int rc, i;227228rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);229if (rc)230return rc;231232/* Shift to the device's base physical address of host memory if necessary */233if (prop->device_dma_offset_for_host_access)234for_each_sgtable_dma_sg(sgt, sg, i)235sg->dma_address += prop->device_dma_offset_for_host_access;236237return 0;238}239240void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,241enum dma_data_direction dir, const char *caller)242{243struct asic_fixed_properties *prop = &hdev->asic_prop;244struct scatterlist *sg;245int i;246247hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir);248249if (trace_habanalabs_dma_unmap_page_enabled()) {250for_each_sgtable_dma_sg(sgt, sg, i)251trace_habanalabs_dma_unmap_page(&(hdev)->pdev->dev,252page_to_phys(sg_page(sg)),253sg->dma_address - prop->device_dma_offset_for_host_access,254#ifdef CONFIG_NEED_SG_DMA_LENGTH255sg->dma_length,256#else257sg->length,258#endif259dir, caller);260}261}262263void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,264enum dma_data_direction dir)265{266struct asic_fixed_properties *prop = &hdev->asic_prop;267struct scatterlist *sg;268int i;269270/* Cancel the device's base physical address of host memory if necessary */271if (prop->device_dma_offset_for_host_access)272for_each_sgtable_dma_sg(sgt, sg, i)273sg->dma_address -= prop->device_dma_offset_for_host_access;274275dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);276}277278/*279* hl_access_cfg_region - access the config region280*281* @hdev: pointer to habanalabs device structure282* @addr: the address to access283* @val: the value to write from or read to284* @acc_type: the type of access (read/write 64/32)285*/286int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,287enum debugfs_access_type acc_type)288{289struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];290u32 val_h, val_l;291292if (!IS_ALIGNED(addr, sizeof(u32))) {293dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));294return -EINVAL;295}296297switch (acc_type) {298case DEBUGFS_READ32:299*val = RREG32(addr - cfg_region->region_base);300break;301case DEBUGFS_WRITE32:302WREG32(addr - cfg_region->region_base, *val);303break;304case DEBUGFS_READ64:305val_l = RREG32(addr - cfg_region->region_base);306val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);307308*val = (((u64) val_h) << 32) | val_l;309break;310case DEBUGFS_WRITE64:311WREG32(addr - cfg_region->region_base, lower_32_bits(*val));312WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));313break;314default:315dev_err(hdev->dev, "access type %d is not supported\n", acc_type);316return -EOPNOTSUPP;317}318319return 0;320}321322/*323* hl_access_dev_mem - access device memory324*325* @hdev: pointer to habanalabs device structure326* @region_type: the type of the region the address belongs to327* @addr: the address to access328* @val: the value to write from or read to329* @acc_type: the type of access (r/w, 32/64)330*/331int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,332u64 addr, u64 *val, enum debugfs_access_type acc_type)333{334switch (region_type) {335case PCI_REGION_CFG:336return hl_access_cfg_region(hdev, addr, val, acc_type);337case PCI_REGION_SRAM:338case PCI_REGION_DRAM:339return hl_access_sram_dram_region(hdev, addr, val, acc_type,340region_type, (region_type == PCI_REGION_DRAM));341default:342return -EFAULT;343}344345return 0;346}347348void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)349{350va_list args;351int str_size;352353va_start(args, fmt);354/* Calculate formatted string length. Assuming each string is null terminated, hence355* increment result by 1356*/357str_size = vsnprintf(NULL, 0, fmt, args) + 1;358va_end(args);359360if ((e->actual_size + str_size) < e->allocated_buf_size) {361va_start(args, fmt);362vsnprintf(e->buf + e->actual_size, str_size, fmt, args);363va_end(args);364}365366/* Need to update the size even when not updating destination buffer to get the exact size367* of all input strings368*/369e->actual_size += str_size;370}371372enum hl_device_status hl_device_status(struct hl_device *hdev)373{374enum hl_device_status status;375376if (hdev->device_fini_pending) {377status = HL_DEVICE_STATUS_MALFUNCTION;378} else if (hdev->reset_info.in_reset) {379if (hdev->reset_info.in_compute_reset)380status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;381else382status = HL_DEVICE_STATUS_IN_RESET;383} else if (hdev->reset_info.needs_reset) {384status = HL_DEVICE_STATUS_NEEDS_RESET;385} else if (hdev->disabled) {386status = HL_DEVICE_STATUS_MALFUNCTION;387} else if (!hdev->init_done) {388status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;389} else {390status = HL_DEVICE_STATUS_OPERATIONAL;391}392393return status;394}395396bool hl_device_operational(struct hl_device *hdev,397enum hl_device_status *status)398{399enum hl_device_status current_status;400401current_status = hl_device_status(hdev);402if (status)403*status = current_status;404405switch (current_status) {406case HL_DEVICE_STATUS_MALFUNCTION:407case HL_DEVICE_STATUS_IN_RESET:408case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:409case HL_DEVICE_STATUS_NEEDS_RESET:410return false;411case HL_DEVICE_STATUS_OPERATIONAL:412case HL_DEVICE_STATUS_IN_DEVICE_CREATION:413default:414return true;415}416}417418bool hl_ctrl_device_operational(struct hl_device *hdev,419enum hl_device_status *status)420{421enum hl_device_status current_status;422423current_status = hl_device_status(hdev);424if (status)425*status = current_status;426427switch (current_status) {428case HL_DEVICE_STATUS_MALFUNCTION:429return false;430case HL_DEVICE_STATUS_IN_RESET:431case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:432case HL_DEVICE_STATUS_NEEDS_RESET:433case HL_DEVICE_STATUS_OPERATIONAL:434case HL_DEVICE_STATUS_IN_DEVICE_CREATION:435default:436return true;437}438}439440static void print_idle_status_mask(struct hl_device *hdev, const char *message,441u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])442{443if (idle_mask[3])444dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx_%016llx)\n",445dev_name(&hdev->pdev->dev), message,446idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);447else if (idle_mask[2])448dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx)\n",449dev_name(&hdev->pdev->dev), message,450idle_mask[2], idle_mask[1], idle_mask[0]);451else if (idle_mask[1])452dev_err(hdev->dev, "%s %s (mask %#llx_%016llx)\n",453dev_name(&hdev->pdev->dev), message, idle_mask[1], idle_mask[0]);454else455dev_err(hdev->dev, "%s %s (mask %#llx)\n", dev_name(&hdev->pdev->dev), message,456idle_mask[0]);457}458459static void hpriv_release(struct kref *ref)460{461u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};462bool reset_device, device_is_idle = true;463struct hl_fpriv *hpriv;464struct hl_device *hdev;465466hpriv = container_of(ref, struct hl_fpriv, refcount);467468hdev = hpriv->hdev;469470hdev->asic_funcs->send_device_activity(hdev, false);471472hl_debugfs_remove_file(hpriv);473474mutex_destroy(&hpriv->ctx_lock);475mutex_destroy(&hpriv->restore_phase_mutex);476477/* There should be no memory buffers at this point and handles IDR can be destroyed */478hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);479480/* Device should be reset if reset-upon-device-release is enabled, or if there is a pending481* reset that waits for device release.482*/483reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;484485/* Check the device idle status and reset if not idle.486* Skip it if already in reset, or if device is going to be reset in any case.487*/488if (!hdev->reset_info.in_reset && !reset_device && !hdev->pldm)489device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,490HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);491if (!device_is_idle) {492print_idle_status_mask(hdev, "device is not idle after user context is closed",493idle_mask);494reset_device = true;495}496497/* We need to remove the user from the list to make sure the reset process won't498* try to kill the user process. Because, if we got here, it means there are no499* more driver/device resources that the user process is occupying so there is500* no need to kill it501*502* However, we can't set the compute_ctx to NULL at this stage. This is to prevent503* a race between the release and opening the device again. We don't want to let504* a user open the device while there a reset is about to happen.505*/506mutex_lock(&hdev->fpriv_list_lock);507list_del(&hpriv->dev_node);508mutex_unlock(&hdev->fpriv_list_lock);509510put_pid(hpriv->taskpid);511512if (reset_device) {513hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);514} else {515/* Scrubbing is handled within hl_device_reset(), so here need to do it directly */516int rc = hdev->asic_funcs->scrub_device_mem(hdev);517518if (rc) {519dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);520hl_device_reset(hdev, HL_DRV_RESET_HARD);521}522}523524/* Now we can mark the compute_ctx as not active. Even if a reset is running in a different525* thread, we don't care because the in_reset is marked so if a user will try to open526* the device it will fail on that, even if compute_ctx is false.527*/528mutex_lock(&hdev->fpriv_list_lock);529hdev->is_compute_ctx_active = false;530mutex_unlock(&hdev->fpriv_list_lock);531532hdev->compute_ctx_in_release = 0;533534/* release the eventfd */535if (hpriv->notifier_event.eventfd)536eventfd_ctx_put(hpriv->notifier_event.eventfd);537538mutex_destroy(&hpriv->notifier_event.lock);539540kfree(hpriv);541}542543void hl_hpriv_get(struct hl_fpriv *hpriv)544{545kref_get(&hpriv->refcount);546}547548int hl_hpriv_put(struct hl_fpriv *hpriv)549{550return kref_put(&hpriv->refcount, hpriv_release);551}552553static void print_device_in_use_info(struct hl_device *hdev,554struct hl_mem_mgr_fini_stats *mm_fini_stats, const char *message)555{556u32 active_cs_num, dmabuf_export_cnt;557bool unknown_reason = true;558char buf[128];559size_t size;560int offset;561562size = sizeof(buf);563offset = 0;564565active_cs_num = hl_get_active_cs_num(hdev);566if (active_cs_num) {567unknown_reason = false;568offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);569}570571dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);572if (dmabuf_export_cnt) {573unknown_reason = false;574offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",575dmabuf_export_cnt);576}577578if (mm_fini_stats->n_busy_cb) {579unknown_reason = false;580offset += scnprintf(buf + offset, size - offset, " [%u live CB handles]",581mm_fini_stats->n_busy_cb);582}583584if (unknown_reason)585scnprintf(buf + offset, size - offset, " [unknown reason]");586587dev_notice(hdev->dev, "%s%s\n", message, buf);588}589590/*591* hl_device_release() - release function for habanalabs device.592* @ddev: pointer to DRM device structure.593* @file: pointer to DRM file private data structure.594*595* Called when process closes an habanalabs device596*/597void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)598{599struct hl_fpriv *hpriv = file_priv->driver_priv;600struct hl_device *hdev = to_hl_device(ddev);601struct hl_mem_mgr_fini_stats mm_fini_stats;602603if (!hdev) {604pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");605put_pid(hpriv->taskpid);606}607608hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);609610/* Memory buffers might be still in use at this point and thus the handles IDR destruction611* is postponed to hpriv_release().612*/613hl_mem_mgr_fini(&hpriv->mem_mgr, &mm_fini_stats);614615hdev->compute_ctx_in_release = 1;616617if (!hl_hpriv_put(hpriv)) {618print_device_in_use_info(hdev, &mm_fini_stats,619"User process closed FD but device still in use");620hl_device_reset(hdev, HL_DRV_RESET_HARD);621}622623hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;624}625626static int hl_device_release_ctrl(struct inode *inode, struct file *filp)627{628struct hl_fpriv *hpriv = filp->private_data;629struct hl_device *hdev = hpriv->hdev;630631filp->private_data = NULL;632633if (!hdev) {634pr_err("Closing FD after device was removed\n");635goto out;636}637638mutex_lock(&hdev->fpriv_ctrl_list_lock);639list_del(&hpriv->dev_node);640mutex_unlock(&hdev->fpriv_ctrl_list_lock);641out:642put_pid(hpriv->taskpid);643644kfree(hpriv);645646return 0;647}648649static int __hl_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)650{651struct hl_device *hdev = hpriv->hdev;652unsigned long vm_pgoff;653654if (!hdev) {655pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");656return -ENODEV;657}658659vm_pgoff = vma->vm_pgoff;660661switch (vm_pgoff & HL_MMAP_TYPE_MASK) {662case HL_MMAP_TYPE_BLOCK:663vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);664return hl_hw_block_mmap(hpriv, vma);665666case HL_MMAP_TYPE_CB:667case HL_MMAP_TYPE_TS_BUFF:668return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);669}670return -EINVAL;671}672673/*674* hl_mmap - mmap function for habanalabs device675*676* @*filp: pointer to file structure677* @*vma: pointer to vm_area_struct of the process678*679* Called when process does an mmap on habanalabs device. Call the relevant mmap680* function at the end of the common code.681*/682int hl_mmap(struct file *filp, struct vm_area_struct *vma)683{684struct drm_file *file_priv = filp->private_data;685struct hl_fpriv *hpriv = file_priv->driver_priv;686687return __hl_mmap(hpriv, vma);688}689690static const struct file_operations hl_ctrl_ops = {691.owner = THIS_MODULE,692.open = hl_device_open_ctrl,693.release = hl_device_release_ctrl,694.unlocked_ioctl = hl_ioctl_control,695.compat_ioctl = hl_ioctl_control696};697698static void device_release_func(struct device *dev)699{700kfree(dev);701}702703/*704* device_init_cdev - Initialize cdev and device for habanalabs device705*706* @hdev: pointer to habanalabs device structure707* @class: pointer to the class object of the device708* @minor: minor number of the specific device709* @fops: file operations to install for this device710* @name: name of the device as it will appear in the filesystem711* @cdev: pointer to the char device object that will be initialized712* @dev: pointer to the device object that will be initialized713*714* Initialize a cdev and a Linux device for habanalabs's device.715*/716static int device_init_cdev(struct hl_device *hdev, const struct class *class,717int minor, const struct file_operations *fops,718char *name, struct cdev *cdev,719struct device **dev)720{721cdev_init(cdev, fops);722cdev->owner = THIS_MODULE;723724*dev = kzalloc(sizeof(**dev), GFP_KERNEL);725if (!*dev)726return -ENOMEM;727728device_initialize(*dev);729(*dev)->devt = MKDEV(hdev->major, minor);730(*dev)->class = class;731(*dev)->release = device_release_func;732dev_set_drvdata(*dev, hdev);733dev_set_name(*dev, "%s", name);734735return 0;736}737738static int cdev_sysfs_debugfs_add(struct hl_device *hdev)739{740const struct class *accel_class = hdev->drm.accel->kdev->class;741char name[32];742int rc;743744hdev->cdev_idx = hdev->drm.accel->index;745746/* Initialize cdev and device structures for the control device */747snprintf(name, sizeof(name), "accel_controlD%d", hdev->cdev_idx);748rc = device_init_cdev(hdev, accel_class, hdev->cdev_idx, &hl_ctrl_ops, name,749&hdev->cdev_ctrl, &hdev->dev_ctrl);750if (rc)751return rc;752753rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);754if (rc) {755dev_err(hdev->dev_ctrl,756"failed to add an accel control char device to the system\n");757goto free_ctrl_device;758}759760rc = hl_sysfs_init(hdev);761if (rc) {762dev_err(hdev->dev, "failed to initialize sysfs\n");763goto delete_ctrl_cdev_device;764}765766hl_debugfs_add_device(hdev);767768hdev->cdev_sysfs_debugfs_created = true;769770return 0;771772delete_ctrl_cdev_device:773cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);774free_ctrl_device:775put_device(hdev->dev_ctrl);776return rc;777}778779static void cdev_sysfs_debugfs_remove(struct hl_device *hdev)780{781if (!hdev->cdev_sysfs_debugfs_created)782return;783784hl_sysfs_fini(hdev);785786cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);787put_device(hdev->dev_ctrl);788}789790static void device_hard_reset_pending(struct work_struct *work)791{792struct hl_device_reset_work *device_reset_work =793container_of(work, struct hl_device_reset_work, reset_work.work);794struct hl_device *hdev = device_reset_work->hdev;795u32 flags;796int rc;797798flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;799800rc = hl_device_reset(hdev, flags);801802if ((rc == -EBUSY) && !hdev->device_fini_pending) {803struct hl_ctx *ctx = hl_get_compute_ctx(hdev);804805if (ctx) {806/* The read refcount value should subtracted by one, because the read is807* protected with hl_get_compute_ctx().808*/809dev_info(hdev->dev,810"Could not reset device (compute_ctx refcount %u). will try again in %u seconds",811kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC);812hl_ctx_put(ctx);813} else {814dev_info(hdev->dev, "Could not reset device. will try again in %u seconds",815HL_PENDING_RESET_PER_SEC);816}817818queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,819secs_to_jiffies(HL_PENDING_RESET_PER_SEC));820}821}822823static void device_release_watchdog_func(struct work_struct *work)824{825struct hl_device_reset_work *watchdog_work =826container_of(work, struct hl_device_reset_work, reset_work.work);827struct hl_device *hdev = watchdog_work->hdev;828u32 flags;829830dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");831832flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;833834hl_device_reset(hdev, flags);835}836837/*838* device_early_init - do some early initialization for the habanalabs device839*840* @hdev: pointer to habanalabs device structure841*842* Install the relevant function pointers and call the early_init function,843* if such a function exists844*/845static int device_early_init(struct hl_device *hdev)846{847int i, rc;848char workq_name[32];849850switch (hdev->asic_type) {851case ASIC_GOYA:852goya_set_asic_funcs(hdev);853strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));854break;855case ASIC_GAUDI:856gaudi_set_asic_funcs(hdev);857strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));858break;859case ASIC_GAUDI_SEC:860gaudi_set_asic_funcs(hdev);861strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));862break;863case ASIC_GAUDI2:864gaudi2_set_asic_funcs(hdev);865strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));866break;867case ASIC_GAUDI2B:868gaudi2_set_asic_funcs(hdev);869strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));870break;871case ASIC_GAUDI2C:872gaudi2_set_asic_funcs(hdev);873strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));874break;875case ASIC_GAUDI2D:876gaudi2_set_asic_funcs(hdev);877strscpy(hdev->asic_name, "GAUDI2D", sizeof(hdev->asic_name));878break;879default:880dev_err(hdev->dev, "Unrecognized ASIC type %d\n",881hdev->asic_type);882return -EINVAL;883}884885rc = hdev->asic_funcs->early_init(hdev);886if (rc)887return rc;888889rc = hl_asid_init(hdev);890if (rc)891goto early_fini;892893if (hdev->asic_prop.completion_queues_count) {894hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,895sizeof(struct workqueue_struct *),896GFP_KERNEL);897if (!hdev->cq_wq) {898rc = -ENOMEM;899goto asid_fini;900}901}902903for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {904snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);905hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);906if (hdev->cq_wq[i] == NULL) {907dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");908rc = -ENOMEM;909goto free_cq_wq;910}911}912913snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);914hdev->eq_wq = create_singlethread_workqueue(workq_name);915if (hdev->eq_wq == NULL) {916dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");917rc = -ENOMEM;918goto free_cq_wq;919}920921snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);922hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);923if (!hdev->cs_cmplt_wq) {924dev_err(hdev->dev,925"Failed to allocate CS completions workqueue\n");926rc = -ENOMEM;927goto free_eq_wq;928}929930snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);931hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);932if (!hdev->ts_free_obj_wq) {933dev_err(hdev->dev,934"Failed to allocate Timestamp registration free workqueue\n");935rc = -ENOMEM;936goto free_cs_cmplt_wq;937}938939snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);940hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);941if (!hdev->prefetch_wq) {942dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");943rc = -ENOMEM;944goto free_ts_free_wq;945}946947hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);948if (!hdev->hl_chip_info) {949rc = -ENOMEM;950goto free_prefetch_wq;951}952953rc = hl_mmu_if_set_funcs(hdev);954if (rc)955goto free_chip_info;956957hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);958959snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);960hdev->reset_wq = create_singlethread_workqueue(workq_name);961if (!hdev->reset_wq) {962rc = -ENOMEM;963dev_err(hdev->dev, "Failed to create device reset WQ\n");964goto free_cb_mgr;965}966967INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);968969INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);970hdev->device_reset_work.hdev = hdev;971hdev->device_fini_pending = 0;972973INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work,974device_release_watchdog_func);975hdev->device_release_watchdog_work.hdev = hdev;976977mutex_init(&hdev->send_cpu_message_lock);978mutex_init(&hdev->debug_lock);979INIT_LIST_HEAD(&hdev->cs_mirror_list);980spin_lock_init(&hdev->cs_mirror_lock);981spin_lock_init(&hdev->reset_info.lock);982INIT_LIST_HEAD(&hdev->fpriv_list);983INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);984mutex_init(&hdev->fpriv_list_lock);985mutex_init(&hdev->fpriv_ctrl_list_lock);986mutex_init(&hdev->clk_throttling.lock);987988return 0;989990free_cb_mgr:991hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);992hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);993free_chip_info:994kfree(hdev->hl_chip_info);995free_prefetch_wq:996destroy_workqueue(hdev->prefetch_wq);997free_ts_free_wq:998destroy_workqueue(hdev->ts_free_obj_wq);999free_cs_cmplt_wq:1000destroy_workqueue(hdev->cs_cmplt_wq);1001free_eq_wq:1002destroy_workqueue(hdev->eq_wq);1003free_cq_wq:1004for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)1005if (hdev->cq_wq[i])1006destroy_workqueue(hdev->cq_wq[i]);1007kfree(hdev->cq_wq);1008asid_fini:1009hl_asid_fini(hdev);1010early_fini:1011if (hdev->asic_funcs->early_fini)1012hdev->asic_funcs->early_fini(hdev);10131014return rc;1015}10161017/*1018* device_early_fini - finalize all that was done in device_early_init1019*1020* @hdev: pointer to habanalabs device structure1021*1022*/1023static void device_early_fini(struct hl_device *hdev)1024{1025int i;10261027mutex_destroy(&hdev->debug_lock);1028mutex_destroy(&hdev->send_cpu_message_lock);10291030mutex_destroy(&hdev->fpriv_list_lock);1031mutex_destroy(&hdev->fpriv_ctrl_list_lock);10321033mutex_destroy(&hdev->clk_throttling.lock);10341035hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);1036hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);10371038kfree(hdev->hl_chip_info);10391040destroy_workqueue(hdev->prefetch_wq);1041destroy_workqueue(hdev->ts_free_obj_wq);1042destroy_workqueue(hdev->cs_cmplt_wq);1043destroy_workqueue(hdev->eq_wq);1044destroy_workqueue(hdev->reset_wq);10451046for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)1047destroy_workqueue(hdev->cq_wq[i]);1048kfree(hdev->cq_wq);10491050hl_asid_fini(hdev);10511052if (hdev->asic_funcs->early_fini)1053hdev->asic_funcs->early_fini(hdev);1054}10551056static bool is_pci_link_healthy(struct hl_device *hdev)1057{1058u16 device_id;10591060if (!hdev->pdev)1061return false;10621063pci_read_config_word(hdev->pdev, PCI_DEVICE_ID, &device_id);10641065return (device_id == hdev->pdev->device);1066}10671068static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)1069{1070struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;1071u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;1072struct asic_fixed_properties *prop = &hdev->asic_prop;10731074if (!prop->cpucp_info.eq_health_check_supported)1075return true;10761077if (!hdev->eq_heartbeat_received) {1078dev_err(hdev->dev, "EQ heartbeat event was not received!\n");10791080dev_err(hdev->dev,1081"EQ: {CI %u, HB counter %u, last HB time: %ptTs}, PQ: {PI: %u, CI: %u (%u), last HB time: %ptTs}\n",1082hdev->event_queue.ci,1083heartbeat_debug_info->heartbeat_event_counter,1084&hdev->heartbeat_debug_info.last_eq_heartbeat_ts,1085hdev->kernel_queues[cpu_q_id].pi,1086atomic_read(&hdev->kernel_queues[cpu_q_id].ci),1087atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,1088&hdev->heartbeat_debug_info.last_pq_heartbeat_ts);10891090hl_eq_dump(hdev, &hdev->event_queue);10911092return false;1093}10941095hdev->eq_heartbeat_received = false;10961097return true;1098}10991100static void hl_device_heartbeat(struct work_struct *work)1101{1102struct hl_device *hdev = container_of(work, struct hl_device,1103work_heartbeat.work);1104struct hl_info_fw_err_info info = {0};1105u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;11061107/* Start heartbeat checks only after driver has enabled events from FW */1108if (!hl_device_operational(hdev, NULL) || !hdev->init_done)1109goto reschedule;11101111/*1112* For EQ health check need to check if driver received the heartbeat eq event1113* in order to validate the eq is working.1114* Only if both the EQ is healthy and we managed to send the next heartbeat reschedule.1115*/1116if (hl_device_eq_heartbeat_received(hdev) && (!hdev->asic_funcs->send_heartbeat(hdev)))1117goto reschedule;11181119if (hl_device_operational(hdev, NULL))1120dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",1121is_pci_link_healthy(hdev) ? "healthy" : "broken");11221123info.err_type = HL_INFO_FW_HEARTBEAT_ERR;1124info.event_mask = &event_mask;1125hl_handle_fw_err(hdev, &info);1126hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);11271128return;11291130reschedule:1131/*1132* prev_reset_trigger tracks consecutive fatal h/w errors until first1133* heartbeat immediately post reset.1134* If control reached here, then at least one heartbeat work has been1135* scheduled since last reset/init cycle.1136* So if the device is not already in reset cycle, reset the flag1137* prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR1138* status for at least one heartbeat. From this point driver restarts1139* tracking future consecutive fatal errors.1140*/1141if (!hdev->reset_info.in_reset)1142hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;11431144schedule_delayed_work(&hdev->work_heartbeat,1145usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));1146}11471148/*1149* device_late_init - do late stuff initialization for the habanalabs device1150*1151* @hdev: pointer to habanalabs device structure1152*1153* Do stuff that either needs the device H/W queues to be active or needs1154* to happen after all the rest of the initialization is finished1155*/1156static int device_late_init(struct hl_device *hdev)1157{1158int rc;11591160if (hdev->asic_funcs->late_init) {1161rc = hdev->asic_funcs->late_init(hdev);1162if (rc) {1163dev_err(hdev->dev,1164"failed late initialization for the H/W\n");1165return rc;1166}1167}11681169hdev->high_pll = hdev->asic_prop.high_pll;1170hdev->late_init_done = true;11711172return 0;1173}11741175/*1176* device_late_fini - finalize all that was done in device_late_init1177*1178* @hdev: pointer to habanalabs device structure1179*1180*/1181static void device_late_fini(struct hl_device *hdev)1182{1183if (!hdev->late_init_done)1184return;11851186if (hdev->asic_funcs->late_fini)1187hdev->asic_funcs->late_fini(hdev);11881189hdev->late_init_done = false;1190}11911192int hl_device_utilization(struct hl_device *hdev, u32 *utilization)1193{1194u64 max_power, curr_power, dc_power, dividend, divisor;1195int rc;11961197max_power = hdev->max_power;1198dc_power = hdev->asic_prop.dc_power_default;1199divisor = max_power - dc_power;1200if (!divisor) {1201dev_warn(hdev->dev, "device utilization is not supported\n");1202return -EOPNOTSUPP;1203}1204rc = hl_fw_cpucp_power_get(hdev, &curr_power);12051206if (rc)1207return rc;12081209curr_power = clamp(curr_power, dc_power, max_power);12101211dividend = (curr_power - dc_power) * 100;1212*utilization = (u32) div_u64(dividend, divisor);12131214return 0;1215}12161217int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)1218{1219int rc = 0;12201221mutex_lock(&hdev->debug_lock);12221223if (!enable) {1224if (!hdev->in_debug) {1225dev_err(hdev->dev,1226"Failed to disable debug mode because device was not in debug mode\n");1227rc = -EFAULT;1228goto out;1229}12301231if (!hdev->reset_info.hard_reset_pending)1232hdev->asic_funcs->halt_coresight(hdev, ctx);12331234hdev->in_debug = 0;12351236goto out;1237}12381239if (hdev->in_debug) {1240dev_err(hdev->dev,1241"Failed to enable debug mode because device is already in debug mode\n");1242rc = -EFAULT;1243goto out;1244}12451246hdev->in_debug = 1;12471248out:1249mutex_unlock(&hdev->debug_lock);12501251return rc;1252}12531254static void take_release_locks(struct hl_device *hdev)1255{1256/* Flush anyone that is inside the critical section of enqueue1257* jobs to the H/W1258*/1259hdev->asic_funcs->hw_queues_lock(hdev);1260hdev->asic_funcs->hw_queues_unlock(hdev);12611262/* Flush processes that are sending message to CPU */1263mutex_lock(&hdev->send_cpu_message_lock);1264mutex_unlock(&hdev->send_cpu_message_lock);12651266/* Flush anyone that is inside device open */1267mutex_lock(&hdev->fpriv_list_lock);1268mutex_unlock(&hdev->fpriv_list_lock);1269mutex_lock(&hdev->fpriv_ctrl_list_lock);1270mutex_unlock(&hdev->fpriv_ctrl_list_lock);1271}12721273static void hl_abort_waiting_for_completions(struct hl_device *hdev)1274{1275hl_abort_waiting_for_cs_completions(hdev);12761277/* Release all pending user interrupts, each pending user interrupt1278* holds a reference to a user context.1279*/1280hl_release_pending_user_interrupts(hdev);1281}12821283static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,1284bool skip_wq_flush)1285{1286if (hard_reset) {1287if (hdev->heartbeat)1288cancel_delayed_work_sync(&hdev->work_heartbeat);12891290device_late_fini(hdev);1291}12921293/*1294* Halt the engines and disable interrupts so we won't get any more1295* completions from H/W and we won't have any accesses from the1296* H/W to the host machine1297*/1298hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);12991300/* Go over all the queues, release all CS and their jobs */1301hl_cs_rollback_all(hdev, skip_wq_flush);13021303/* flush the MMU prefetch workqueue */1304flush_workqueue(hdev->prefetch_wq);13051306hl_abort_waiting_for_completions(hdev);1307}13081309/*1310* hl_device_suspend - initiate device suspend1311*1312* @hdev: pointer to habanalabs device structure1313*1314* Puts the hw in the suspend state (all asics).1315* Returns 0 for success or an error on failure.1316* Called at driver suspend.1317*/1318int hl_device_suspend(struct hl_device *hdev)1319{1320int rc;13211322pci_save_state(hdev->pdev);13231324/* Block future CS/VM/JOB completion operations */1325spin_lock(&hdev->reset_info.lock);1326if (hdev->reset_info.in_reset) {1327spin_unlock(&hdev->reset_info.lock);1328dev_err(hdev->dev, "Can't suspend while in reset\n");1329return -EIO;1330}1331hdev->reset_info.in_reset = 1;1332spin_unlock(&hdev->reset_info.lock);13331334/* This blocks all other stuff that is not blocked by in_reset */1335hdev->disabled = true;13361337take_release_locks(hdev);13381339rc = hdev->asic_funcs->suspend(hdev);1340if (rc)1341dev_err(hdev->dev,1342"Failed to disable PCI access of device CPU\n");13431344/* Shut down the device */1345pci_disable_device(hdev->pdev);1346pci_set_power_state(hdev->pdev, PCI_D3hot);13471348return 0;1349}13501351/*1352* hl_device_resume - initiate device resume1353*1354* @hdev: pointer to habanalabs device structure1355*1356* Bring the hw back to operating state (all asics).1357* Returns 0 for success or an error on failure.1358* Called at driver resume.1359*/1360int hl_device_resume(struct hl_device *hdev)1361{1362int rc;13631364pci_set_power_state(hdev->pdev, PCI_D0);1365pci_restore_state(hdev->pdev);1366rc = pci_enable_device_mem(hdev->pdev);1367if (rc) {1368dev_err(hdev->dev,1369"Failed to enable PCI device in resume\n");1370return rc;1371}13721373pci_set_master(hdev->pdev);13741375rc = hdev->asic_funcs->resume(hdev);1376if (rc) {1377dev_err(hdev->dev, "Failed to resume device after suspend\n");1378goto disable_device;1379}138013811382/* 'in_reset' was set to true during suspend, now we must clear it in order1383* for hard reset to be performed1384*/1385spin_lock(&hdev->reset_info.lock);1386hdev->reset_info.in_reset = 0;1387spin_unlock(&hdev->reset_info.lock);13881389rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);1390if (rc) {1391dev_err(hdev->dev, "Failed to reset device during resume\n");1392goto disable_device;1393}13941395return 0;13961397disable_device:1398pci_disable_device(hdev->pdev);13991400return rc;1401}14021403static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)1404{1405struct task_struct *task = NULL;1406struct list_head *hpriv_list;1407struct hl_fpriv *hpriv;1408struct mutex *hpriv_lock;1409u32 pending_cnt;14101411hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;1412hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;14131414/* Giving time for user to close FD, and for processes that are inside1415* hl_device_open to finish1416*/1417if (!list_empty(hpriv_list))1418ssleep(1);14191420if (timeout) {1421pending_cnt = timeout;1422} else {1423if (hdev->process_kill_trial_cnt) {1424/* Processes have been already killed */1425pending_cnt = 1;1426goto wait_for_processes;1427} else {1428/* Wait a small period after process kill */1429pending_cnt = HL_PENDING_RESET_PER_SEC;1430}1431}14321433mutex_lock(hpriv_lock);14341435/* This section must be protected because we are dereferencing1436* pointers that are freed if the process exits1437*/1438list_for_each_entry(hpriv, hpriv_list, dev_node) {1439task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);1440if (task) {1441dev_info(hdev->dev, "Killing user process pid=%d\n",1442task_pid_nr(task));1443send_sig(SIGKILL, task, 1);1444usleep_range(1000, 10000);14451446put_task_struct(task);1447} else {1448dev_dbg(hdev->dev,1449"Can't get task struct for user process %d, process was killed from outside the driver\n",1450pid_nr(hpriv->taskpid));1451}1452}14531454mutex_unlock(hpriv_lock);14551456/*1457* We killed the open users, but that doesn't mean they are closed.1458* It could be that they are running a long cleanup phase in the driver1459* e.g. MMU unmappings, or running other long teardown flow even before1460* our cleanup.1461* Therefore we need to wait again to make sure they are closed before1462* continuing with the reset.1463*/14641465wait_for_processes:1466while ((!list_empty(hpriv_list)) && (pending_cnt)) {1467dev_dbg(hdev->dev,1468"Waiting for all unmap operations to finish before hard reset\n");14691470pending_cnt--;14711472ssleep(1);1473}14741475/* All processes exited successfully */1476if (list_empty(hpriv_list))1477return 0;14781479/* Give up waiting for processes to exit */1480if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)1481return -ETIME;14821483hdev->process_kill_trial_cnt++;14841485return -EBUSY;1486}14871488static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)1489{1490struct list_head *hpriv_list;1491struct hl_fpriv *hpriv;1492struct mutex *hpriv_lock;14931494hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;1495hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;14961497mutex_lock(hpriv_lock);1498list_for_each_entry(hpriv, hpriv_list, dev_node)1499hpriv->hdev = NULL;1500mutex_unlock(hpriv_lock);1501}15021503static void send_disable_pci_access(struct hl_device *hdev, u32 flags)1504{1505/* If reset is due to heartbeat, device CPU is no responsive in1506* which case no point sending PCI disable message to it.1507*/1508if ((flags & HL_DRV_RESET_HARD) &&1509!(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {1510/* Disable PCI access from device F/W so he won't send1511* us additional interrupts. We disable MSI/MSI-X at1512* the halt_engines function and we can't have the F/W1513* sending us interrupts after that. We need to disable1514* the access here because if the device is marked1515* disable, the message won't be send. Also, in case1516* of heartbeat, the device CPU is marked as disable1517* so this message won't be sent1518*/1519if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))1520return;15211522/* disable_irq also generates sync irq, this verifies that last EQs are handled1523* before disabled is set. The IRQ will be enabled again in request_irq call.1524*/1525if (hdev->cpu_queues_enable)1526disable_irq(pci_irq_vector(hdev->pdev, hdev->asic_prop.eq_interrupt_id));1527}1528}15291530static void handle_reset_trigger(struct hl_device *hdev, u32 flags)1531{1532u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;15331534/* No consecutive mechanism when user context exists */1535if (hdev->is_compute_ctx_active)1536return;15371538/*1539* 'reset cause' is being updated here, because getting here1540* means that it's the 1st time and the last time we're here1541* ('in_reset' makes sure of it). This makes sure that1542* 'reset_cause' will continue holding its 1st recorded reason!1543*/1544if (flags & HL_DRV_RESET_HEARTBEAT) {1545hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;1546cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;1547} else if (flags & HL_DRV_RESET_TDR) {1548hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;1549cur_reset_trigger = HL_DRV_RESET_TDR;1550} else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {1551hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;1552cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;1553} else {1554hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;1555}15561557/*1558* If reset cause is same twice, then reset_trigger_repeated1559* is set and if this reset is due to a fatal FW error1560* device is set to an unstable state.1561*/1562if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {1563hdev->reset_info.prev_reset_trigger = cur_reset_trigger;1564hdev->reset_info.reset_trigger_repeated = 0;1565} else {1566hdev->reset_info.reset_trigger_repeated = 1;1567}1568}15691570static void reset_heartbeat_debug_info(struct hl_device *hdev)1571{1572hdev->heartbeat_debug_info.last_pq_heartbeat_ts = 0;1573hdev->heartbeat_debug_info.last_eq_heartbeat_ts = 0;1574hdev->heartbeat_debug_info.heartbeat_event_counter = 0;1575}15761577static inline void device_heartbeat_schedule(struct hl_device *hdev)1578{1579if (!hdev->heartbeat)1580return;15811582reset_heartbeat_debug_info(hdev);15831584/*1585* Before scheduling the heartbeat driver will check if eq event has received.1586* for the first schedule we need to set the indication as true then for the next1587* one this indication will be true only if eq event was sent by FW.1588*/1589hdev->eq_heartbeat_received = true;15901591schedule_delayed_work(&hdev->work_heartbeat,1592usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));1593}15941595/*1596* hl_device_reset - reset the device1597*1598* @hdev: pointer to habanalabs device structure1599* @flags: reset flags.1600*1601* Block future CS and wait for pending CS to be enqueued1602* Call ASIC H/W fini1603* Flush all completions1604* Re-initialize all internal data structures1605* Call ASIC H/W init, late_init1606* Test queues1607* Enable device1608*1609* Returns 0 for success or an error on failure.1610*/1611int hl_device_reset(struct hl_device *hdev, u32 flags)1612{1613bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,1614schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;1615u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};1616struct hl_ctx *ctx;1617int i, rc, hw_fini_rc;16181619if (!hdev->init_done) {1620dev_err(hdev->dev, "Can't reset before initialization is done\n");1621return 0;1622}16231624hard_reset = !!(flags & HL_DRV_RESET_HARD);1625from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);1626fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);1627from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);1628delay_reset = !!(flags & HL_DRV_RESET_DELAY);1629from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);1630reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;16311632if (hdev->cpld_shutdown) {1633dev_err(hdev->dev, "Cannot reset device, cpld is shutdown! Device is NOT usable\n");1634return -EIO;1635}16361637if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {1638dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");1639return 0;1640}16411642if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {1643dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");1644hard_reset = true;1645}16461647if (reset_upon_device_release) {1648if (hard_reset) {1649dev_crit(hdev->dev,1650"Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");1651return -EINVAL;1652}16531654goto do_reset;1655}16561657if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {1658dev_dbg(hdev->dev,1659"asic doesn't allow inference soft reset - do hard-reset instead\n");1660hard_reset = true;1661}16621663do_reset:1664/* Re-entry of reset thread */1665if (from_hard_reset_thread && hdev->process_kill_trial_cnt)1666goto kill_processes;16671668/*1669* Prevent concurrency in this function - only one reset should be1670* done at any given time. We need to perform this only if we didn't1671* get here from a dedicated hard reset thread.1672*/1673if (!from_hard_reset_thread) {1674/* Block future CS/VM/JOB completion operations */1675spin_lock(&hdev->reset_info.lock);1676if (hdev->reset_info.in_reset) {1677/* We allow scheduling of a hard reset only during a compute reset */1678if (hard_reset && hdev->reset_info.in_compute_reset)1679hdev->reset_info.hard_reset_schedule_flags = flags;1680spin_unlock(&hdev->reset_info.lock);1681return 0;1682}16831684/* This still allows the completion of some KDMA ops1685* Update this before in_reset because in_compute_reset implies we are in reset1686*/1687hdev->reset_info.in_compute_reset = !hard_reset;16881689hdev->reset_info.in_reset = 1;16901691spin_unlock(&hdev->reset_info.lock);16921693/* Cancel the device release watchdog work if required.1694* In case of reset-upon-device-release while the release watchdog work is1695* scheduled due to a hard-reset, do hard-reset instead of compute-reset.1696*/1697if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {1698struct hl_device_reset_work *watchdog_work =1699&hdev->device_release_watchdog_work;17001701hdev->reset_info.watchdog_active = 0;1702if (!from_watchdog_thread)1703cancel_delayed_work_sync(&watchdog_work->reset_work);17041705if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {1706hdev->reset_info.in_compute_reset = 0;1707flags |= HL_DRV_RESET_HARD;1708flags &= ~HL_DRV_RESET_DEV_RELEASE;1709hard_reset = true;1710}1711}17121713if (delay_reset)1714usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);17151716escalate_reset_flow:1717handle_reset_trigger(hdev, flags);1718send_disable_pci_access(hdev, flags);17191720/* This also blocks future CS/VM/JOB completion operations */1721hdev->disabled = true;17221723take_release_locks(hdev);17241725if (hard_reset)1726dev_info(hdev->dev, "Going to reset device\n");1727else if (reset_upon_device_release)1728dev_dbg(hdev->dev, "Going to reset device after release by user\n");1729else1730dev_dbg(hdev->dev, "Going to reset engines of inference device\n");1731}17321733if ((hard_reset) && (!from_hard_reset_thread)) {1734hdev->reset_info.hard_reset_pending = true;17351736hdev->process_kill_trial_cnt = 0;17371738hdev->device_reset_work.flags = flags;17391740/*1741* Because the reset function can't run from heartbeat work,1742* we need to call the reset function from a dedicated work.1743*/1744queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0);17451746return 0;1747}17481749cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release);17501751kill_processes:1752if (hard_reset) {1753/* Kill processes here after CS rollback. This is because the1754* process can't really exit until all its CSs are done, which1755* is what we do in cs rollback1756*/1757rc = device_kill_open_processes(hdev, 0, false);17581759if (rc == -EBUSY) {1760if (hdev->device_fini_pending) {1761dev_crit(hdev->dev,1762"%s Failed to kill all open processes, stopping hard reset\n",1763dev_name(&(hdev)->pdev->dev));1764goto out_err;1765}17661767/* signal reset thread to reschedule */1768return rc;1769}17701771if (rc) {1772dev_crit(hdev->dev,1773"%s Failed to kill all open processes, stopping hard reset\n",1774dev_name(&(hdev)->pdev->dev));1775goto out_err;1776}17771778/* Flush the Event queue workers to make sure no other thread is1779* reading or writing to registers during the reset1780*/1781flush_workqueue(hdev->eq_wq);1782}17831784/* Reset the H/W. It will be in idle state after this returns */1785hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);17861787if (hard_reset) {1788hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;17891790/* Release kernel context */1791if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)1792hdev->kernel_ctx = NULL;17931794hl_vm_fini(hdev);1795hl_mmu_fini(hdev);1796hl_eq_reset(hdev, &hdev->event_queue);1797}17981799/* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */1800hl_hw_queue_reset(hdev, hard_reset);1801for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)1802hl_cq_reset(hdev, &hdev->completion_queue[i]);18031804/* Make sure the context switch phase will run again */1805ctx = hl_get_compute_ctx(hdev);1806if (ctx) {1807atomic_set(&ctx->thread_ctx_switch_token, 1);1808ctx->thread_ctx_switch_wait_token = 0;1809hl_ctx_put(ctx);1810}18111812if (hw_fini_rc) {1813rc = hw_fini_rc;1814goto out_err;1815}1816/* Finished tear-down, starting to re-initialize */18171818if (hard_reset) {1819hdev->device_cpu_disabled = false;1820hdev->reset_info.hard_reset_pending = false;18211822/*1823* Put the device in an unusable state if there are 2 back to back resets due to1824* fatal errors.1825*/1826if (hdev->reset_info.reset_trigger_repeated &&1827(hdev->reset_info.prev_reset_trigger == HL_DRV_RESET_FW_FATAL_ERR ||1828hdev->reset_info.prev_reset_trigger ==1829HL_DRV_RESET_HEARTBEAT)) {1830dev_crit(hdev->dev,1831"%s Consecutive fatal errors, stopping hard reset\n",1832dev_name(&(hdev)->pdev->dev));1833rc = -EIO;1834goto out_err;1835}18361837if (hdev->kernel_ctx) {1838dev_crit(hdev->dev,1839"%s kernel ctx was alive during hard reset, something is terribly wrong\n",1840dev_name(&(hdev)->pdev->dev));1841rc = -EBUSY;1842goto out_err;1843}18441845rc = hl_mmu_init(hdev);1846if (rc) {1847dev_err(hdev->dev,1848"Failed to initialize MMU S/W after hard reset\n");1849goto out_err;1850}18511852/* Allocate the kernel context */1853hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),1854GFP_KERNEL);1855if (!hdev->kernel_ctx) {1856rc = -ENOMEM;1857hl_mmu_fini(hdev);1858goto out_err;1859}18601861hdev->is_compute_ctx_active = false;18621863rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);1864if (rc) {1865dev_err(hdev->dev,1866"failed to init kernel ctx in hard reset\n");1867kfree(hdev->kernel_ctx);1868hdev->kernel_ctx = NULL;1869hl_mmu_fini(hdev);1870goto out_err;1871}1872}18731874/* Device is now enabled as part of the initialization requires1875* communication with the device firmware to get information that1876* is required for the initialization itself1877*/1878hdev->disabled = false;18791880/* F/W security enabled indication might be updated after hard-reset */1881if (hard_reset) {1882rc = hl_fw_read_preboot_status(hdev);1883if (rc)1884goto out_err;1885}18861887rc = hdev->asic_funcs->hw_init(hdev);1888if (rc) {1889dev_err(hdev->dev, "failed to initialize the H/W after reset\n");1890goto out_err;1891}18921893/* If device is not idle fail the reset process */1894if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,1895HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {1896print_idle_status_mask(hdev, "device is not idle after reset", idle_mask);1897rc = -EIO;1898goto out_err;1899}19001901/* Check that the communication with the device is working */1902rc = hdev->asic_funcs->test_queues(hdev);1903if (rc) {1904dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");1905goto out_err;1906}19071908if (hard_reset) {1909rc = device_late_init(hdev);1910if (rc) {1911dev_err(hdev->dev, "Failed late init after hard reset\n");1912goto out_err;1913}19141915rc = hl_vm_init(hdev);1916if (rc) {1917dev_err(hdev->dev, "Failed to init memory module after hard reset\n");1918goto out_err;1919}19201921if (!hdev->asic_prop.fw_security_enabled)1922hl_fw_set_max_power(hdev);1923} else {1924rc = hdev->asic_funcs->compute_reset_late_init(hdev);1925if (rc) {1926if (reset_upon_device_release)1927dev_err(hdev->dev,1928"Failed late init in reset after device release\n");1929else1930dev_err(hdev->dev, "Failed late init after compute reset\n");1931goto out_err;1932}1933}19341935rc = hdev->asic_funcs->scrub_device_mem(hdev);1936if (rc) {1937dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);1938goto out_err;1939}19401941spin_lock(&hdev->reset_info.lock);1942hdev->reset_info.in_compute_reset = 0;19431944/* Schedule hard reset only if requested and if not already in hard reset.1945* We keep 'in_reset' enabled, so no other reset can go in during the hard1946* reset schedule1947*/1948if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)1949schedule_hard_reset = true;1950else1951hdev->reset_info.in_reset = 0;19521953spin_unlock(&hdev->reset_info.lock);19541955hdev->reset_info.needs_reset = false;19561957if (hard_reset)1958dev_info(hdev->dev,1959"Successfully finished resetting the %s device\n",1960dev_name(&(hdev)->pdev->dev));1961else1962dev_dbg(hdev->dev,1963"Successfully finished resetting the %s device\n",1964dev_name(&(hdev)->pdev->dev));19651966if (hard_reset) {1967hdev->reset_info.hard_reset_cnt++;19681969device_heartbeat_schedule(hdev);19701971/* After reset is done, we are ready to receive events from1972* the F/W. We can't do it before because we will ignore events1973* and if those events are fatal, we won't know about it and1974* the device will be operational although it shouldn't be1975*/1976hdev->asic_funcs->enable_events_from_fw(hdev);1977} else {1978if (!reset_upon_device_release)1979hdev->reset_info.compute_reset_cnt++;19801981if (schedule_hard_reset) {1982dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");1983flags = hdev->reset_info.hard_reset_schedule_flags;1984hdev->reset_info.hard_reset_schedule_flags = 0;1985hard_reset = true;1986goto escalate_reset_flow;1987}1988}19891990return 0;19911992out_err:1993hdev->disabled = true;19941995spin_lock(&hdev->reset_info.lock);1996hdev->reset_info.in_compute_reset = 0;19971998if (hard_reset) {1999dev_err(hdev->dev,2000"%s Failed to reset! Device is NOT usable\n",2001dev_name(&(hdev)->pdev->dev));2002hdev->reset_info.hard_reset_cnt++;2003} else {2004if (reset_upon_device_release) {2005dev_err(hdev->dev, "Failed to reset device after user release\n");2006flags &= ~HL_DRV_RESET_DEV_RELEASE;2007} else {2008dev_err(hdev->dev, "Failed to do compute reset\n");2009hdev->reset_info.compute_reset_cnt++;2010}20112012spin_unlock(&hdev->reset_info.lock);2013flags |= HL_DRV_RESET_HARD;2014hard_reset = true;2015goto escalate_reset_flow;2016}20172018hdev->reset_info.in_reset = 0;20192020spin_unlock(&hdev->reset_info.lock);20212022return rc;2023}20242025/*2026* hl_device_cond_reset() - conditionally reset the device.2027* @hdev: pointer to habanalabs device structure.2028* @reset_flags: reset flags.2029* @event_mask: events to notify user about.2030*2031* Conditionally reset the device, or alternatively schedule a watchdog work to reset the device2032* unless another reset precedes it.2033*/2034int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)2035{2036struct hl_ctx *ctx = NULL;20372038/* F/W reset cannot be postponed */2039if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)2040goto device_reset;20412042/* Device release watchdog is relevant only if user exists and gets a reset notification */2043if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) {2044dev_err(hdev->dev, "Resetting device without a reset indication to user\n");2045goto device_reset;2046}20472048ctx = hl_get_compute_ctx(hdev);2049if (!ctx)2050goto device_reset;20512052/*2053* There is no point in postponing the reset if user is not registered for events.2054* However if no eventfd_ctx exists but the device release watchdog is already scheduled, it2055* just implies that user has unregistered as part of handling a previous event. In this2056* case an immediate reset is not required.2057*/2058if (!ctx->hpriv->notifier_event.eventfd && !hdev->reset_info.watchdog_active)2059goto device_reset;20602061/* Schedule the device release watchdog work unless reset is already in progress or if the2062* work is already scheduled.2063*/2064spin_lock(&hdev->reset_info.lock);2065if (hdev->reset_info.in_reset) {2066spin_unlock(&hdev->reset_info.lock);2067goto device_reset;2068}20692070if (hdev->reset_info.watchdog_active) {2071hdev->device_release_watchdog_work.flags |= flags;2072goto out;2073}20742075hdev->device_release_watchdog_work.flags = flags;2076dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",2077hdev->device_release_watchdog_timeout_sec);2078schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,2079secs_to_jiffies(hdev->device_release_watchdog_timeout_sec));2080hdev->reset_info.watchdog_active = 1;2081out:2082spin_unlock(&hdev->reset_info.lock);20832084hl_notifier_event_send_all(hdev, event_mask);20852086hl_ctx_put(ctx);20872088hl_abort_waiting_for_completions(hdev);20892090return 0;20912092device_reset:2093if (event_mask)2094hl_notifier_event_send_all(hdev, event_mask);2095if (ctx)2096hl_ctx_put(ctx);20972098return hl_device_reset(hdev, flags | HL_DRV_RESET_HARD);2099}21002101static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)2102{2103mutex_lock(¬ifier_event->lock);2104notifier_event->events_mask |= event_mask;21052106if (notifier_event->eventfd)2107eventfd_signal(notifier_event->eventfd);21082109mutex_unlock(¬ifier_event->lock);2110}21112112/*2113* hl_notifier_event_send_all - notify all user processes via eventfd2114*2115* @hdev: pointer to habanalabs device structure2116* @event_mask: the occurred event/s2117* Returns 0 for success or an error on failure.2118*/2119void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)2120{2121struct hl_fpriv *hpriv;21222123if (!event_mask) {2124dev_warn(hdev->dev, "Skip sending zero event");2125return;2126}21272128mutex_lock(&hdev->fpriv_list_lock);21292130list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)2131hl_notifier_event_send(&hpriv->notifier_event, event_mask);21322133mutex_unlock(&hdev->fpriv_list_lock);2134}21352136/*2137* hl_device_init - main initialization function for habanalabs device2138*2139* @hdev: pointer to habanalabs device structure2140*2141* Allocate an id for the device, do early initialization and then call the2142* ASIC specific initialization functions. Finally, create the cdev and the2143* Linux device to expose it to the user2144*/2145int hl_device_init(struct hl_device *hdev)2146{2147int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;2148struct hl_ts_free_jobs *free_jobs_data;2149bool expose_interfaces_on_err = false;2150void *p;21512152/* Initialize ASIC function pointers and perform early init */2153rc = device_early_init(hdev);2154if (rc)2155goto out_disabled;21562157user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +2158hdev->asic_prop.user_interrupt_count;21592160if (user_interrupt_cnt) {2161hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),2162GFP_KERNEL);2163if (!hdev->user_interrupt) {2164rc = -ENOMEM;2165goto early_fini;2166}21672168/* Timestamp records supported only if CQ supported in device */2169if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {2170for (i = 0 ; i < user_interrupt_cnt ; i++) {2171p = vzalloc(TIMESTAMP_FREE_NODES_NUM *2172sizeof(struct timestamp_reg_free_node));2173if (!p) {2174rc = -ENOMEM;2175goto free_usr_intr_mem;2176}2177free_jobs_data = &hdev->user_interrupt[i].ts_free_jobs_data;2178free_jobs_data->free_nodes_pool = p;2179free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;2180free_jobs_data->next_avail_free_node_idx = 0;2181}2182}2183}21842185free_jobs_data = &hdev->common_user_cq_interrupt.ts_free_jobs_data;2186p = vzalloc(TIMESTAMP_FREE_NODES_NUM *2187sizeof(struct timestamp_reg_free_node));2188if (!p) {2189rc = -ENOMEM;2190goto free_usr_intr_mem;2191}21922193free_jobs_data->free_nodes_pool = p;2194free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;2195free_jobs_data->next_avail_free_node_idx = 0;21962197/*2198* Start calling ASIC initialization. First S/W then H/W and finally2199* late init2200*/2201rc = hdev->asic_funcs->sw_init(hdev);2202if (rc)2203goto free_common_usr_intr_mem;220422052206/* initialize completion structure for multi CS wait */2207hl_multi_cs_completion_init(hdev);22082209/*2210* Initialize the H/W queues. Must be done before hw_init, because2211* there the addresses of the kernel queue are being written to the2212* registers of the device2213*/2214rc = hl_hw_queues_create(hdev);2215if (rc) {2216dev_err(hdev->dev, "failed to initialize kernel queues\n");2217goto sw_fini;2218}22192220cq_cnt = hdev->asic_prop.completion_queues_count;22212222/*2223* Initialize the completion queues. Must be done before hw_init,2224* because there the addresses of the completion queues are being2225* passed as arguments to request_irq2226*/2227if (cq_cnt) {2228hdev->completion_queue = kcalloc(cq_cnt,2229sizeof(*hdev->completion_queue),2230GFP_KERNEL);22312232if (!hdev->completion_queue) {2233dev_err(hdev->dev,2234"failed to allocate completion queues\n");2235rc = -ENOMEM;2236goto hw_queues_destroy;2237}2238}22392240for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {2241rc = hl_cq_init(hdev, &hdev->completion_queue[i],2242hdev->asic_funcs->get_queue_id_for_cq(hdev, i));2243if (rc) {2244dev_err(hdev->dev,2245"failed to initialize completion queue\n");2246goto cq_fini;2247}2248hdev->completion_queue[i].cq_idx = i;2249}22502251hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,2252sizeof(struct hl_cs *), GFP_KERNEL);2253if (!hdev->shadow_cs_queue) {2254rc = -ENOMEM;2255goto cq_fini;2256}22572258/*2259* Initialize the event queue. Must be done before hw_init,2260* because there the address of the event queue is being2261* passed as argument to request_irq2262*/2263rc = hl_eq_init(hdev, &hdev->event_queue);2264if (rc) {2265dev_err(hdev->dev, "failed to initialize event queue\n");2266goto free_shadow_cs_queue;2267}22682269/* MMU S/W must be initialized before kernel context is created */2270rc = hl_mmu_init(hdev);2271if (rc) {2272dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");2273goto eq_fini;2274}22752276/* Allocate the kernel context */2277hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);2278if (!hdev->kernel_ctx) {2279rc = -ENOMEM;2280goto mmu_fini;2281}22822283hdev->is_compute_ctx_active = false;22842285hdev->asic_funcs->state_dump_init(hdev);22862287hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;22882289hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;22902291rc = hl_debugfs_device_init(hdev);2292if (rc) {2293dev_err(hdev->dev, "failed to initialize debugfs entry structure\n");2294kfree(hdev->kernel_ctx);2295goto mmu_fini;2296}22972298/* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after2299* hl_debugfs_device_init().2300*/2301rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);2302if (rc) {2303dev_err(hdev->dev, "failed to initialize kernel context\n");2304kfree(hdev->kernel_ctx);2305goto debugfs_device_fini;2306}23072308rc = hl_cb_pool_init(hdev);2309if (rc) {2310dev_err(hdev->dev, "failed to initialize CB pool\n");2311goto release_ctx;2312}23132314rc = hl_dec_init(hdev);2315if (rc) {2316dev_err(hdev->dev, "Failed to initialize the decoder module\n");2317goto cb_pool_fini;2318}23192320/*2321* From this point, override rc (=0) in case of an error to allow debugging2322* (by adding char devices and creating sysfs/debugfs files as part of the error flow).2323*/2324expose_interfaces_on_err = true;23252326/* Device is now enabled as part of the initialization requires2327* communication with the device firmware to get information that2328* is required for the initialization itself2329*/2330hdev->disabled = false;23312332rc = hdev->asic_funcs->hw_init(hdev);2333if (rc) {2334dev_err(hdev->dev, "failed to initialize the H/W\n");2335rc = 0;2336goto out_disabled;2337}23382339/* Check that the communication with the device is working */2340rc = hdev->asic_funcs->test_queues(hdev);2341if (rc) {2342dev_err(hdev->dev, "Failed to detect if device is alive\n");2343rc = 0;2344goto out_disabled;2345}23462347rc = device_late_init(hdev);2348if (rc) {2349dev_err(hdev->dev, "Failed late initialization\n");2350rc = 0;2351goto out_disabled;2352}23532354dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",2355hdev->asic_name,2356hdev->asic_prop.dram_size / SZ_1G);23572358rc = hl_vm_init(hdev);2359if (rc) {2360dev_err(hdev->dev, "Failed to initialize memory module\n");2361rc = 0;2362goto out_disabled;2363}23642365/*2366* Expose devices and sysfs/debugfs files to user.2367* From here there is no need to expose them in case of an error.2368*/2369expose_interfaces_on_err = false;23702371rc = drm_dev_register(&hdev->drm, 0);2372if (rc) {2373dev_err(hdev->dev, "Failed to register DRM device, rc %d\n", rc);2374rc = 0;2375goto out_disabled;2376}23772378rc = cdev_sysfs_debugfs_add(hdev);2379if (rc) {2380dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n");2381rc = 0;2382goto out_disabled;2383}23842385/* Need to call this again because the max power might change,2386* depending on card type for certain ASICs2387*/2388if (hdev->asic_prop.set_max_power_on_device_init &&2389!hdev->asic_prop.fw_security_enabled)2390hl_fw_set_max_power(hdev);23912392/*2393* hl_hwmon_init() must be called after device_late_init(), because only2394* there we get the information from the device about which2395* hwmon-related sensors the device supports.2396* Furthermore, it must be done after adding the device to the system.2397*/2398rc = hl_hwmon_init(hdev);2399if (rc) {2400dev_err(hdev->dev, "Failed to initialize hwmon\n");2401rc = 0;2402goto out_disabled;2403}24042405/* Scheduling the EQ heartbeat thread must come after driver is done with all2406* initializations, as we want to make sure the FW gets enough time to be prepared2407* to respond to heartbeat packets.2408*/2409device_heartbeat_schedule(hdev);24102411dev_notice(hdev->dev,2412"Successfully added device %s to habanalabs driver\n",2413dev_name(&(hdev)->pdev->dev));24142415/* After initialization is done, we are ready to receive events from2416* the F/W. We can't do it before because we will ignore events and if2417* those events are fatal, we won't know about it and the device will2418* be operational although it shouldn't be2419*/2420hdev->asic_funcs->enable_events_from_fw(hdev);24212422hdev->init_done = true;24232424return 0;24252426cb_pool_fini:2427hl_cb_pool_fini(hdev);2428release_ctx:2429if (hl_ctx_put(hdev->kernel_ctx) != 1)2430dev_err(hdev->dev,2431"kernel ctx is still alive on initialization failure\n");2432debugfs_device_fini:2433hl_debugfs_device_fini(hdev);2434mmu_fini:2435hl_mmu_fini(hdev);2436eq_fini:2437hl_eq_fini(hdev, &hdev->event_queue);2438free_shadow_cs_queue:2439kfree(hdev->shadow_cs_queue);2440cq_fini:2441for (i = 0 ; i < cq_ready_cnt ; i++)2442hl_cq_fini(hdev, &hdev->completion_queue[i]);2443kfree(hdev->completion_queue);2444hw_queues_destroy:2445hl_hw_queues_destroy(hdev);2446sw_fini:2447hdev->asic_funcs->sw_fini(hdev);2448free_common_usr_intr_mem:2449vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);2450free_usr_intr_mem:2451if (user_interrupt_cnt) {2452for (i = 0 ; i < user_interrupt_cnt ; i++) {2453if (!hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool)2454break;2455vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);2456}2457kfree(hdev->user_interrupt);2458}2459early_fini:2460device_early_fini(hdev);2461out_disabled:2462hdev->disabled = true;2463if (expose_interfaces_on_err) {2464drm_dev_register(&hdev->drm, 0);2465cdev_sysfs_debugfs_add(hdev);2466}24672468pr_err("Failed to initialize accel%d. Device %s is NOT usable!\n",2469hdev->cdev_idx, dev_name(&hdev->pdev->dev));24702471return rc;2472}24732474/*2475* hl_device_fini - main tear-down function for habanalabs device2476*2477* @hdev: pointer to habanalabs device structure2478*2479* Destroy the device, call ASIC fini functions and release the id2480*/2481void hl_device_fini(struct hl_device *hdev)2482{2483u32 user_interrupt_cnt;2484bool device_in_reset;2485ktime_t timeout;2486u64 reset_sec;2487int i, rc;24882489dev_info(hdev->dev, "Removing device %s\n", dev_name(&(hdev)->pdev->dev));24902491hdev->device_fini_pending = 1;2492flush_delayed_work(&hdev->device_reset_work.reset_work);24932494if (hdev->pldm)2495reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;2496else2497reset_sec = HL_HARD_RESET_MAX_TIMEOUT;24982499/*2500* This function is competing with the reset function, so try to2501* take the reset atomic and if we are already in middle of reset,2502* wait until reset function is finished. Reset function is designed2503* to always finish. However, in Gaudi, because of all the network2504* ports, the hard reset could take between 10-30 seconds2505*/25062507timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);25082509spin_lock(&hdev->reset_info.lock);2510device_in_reset = !!hdev->reset_info.in_reset;2511if (!device_in_reset)2512hdev->reset_info.in_reset = 1;2513spin_unlock(&hdev->reset_info.lock);25142515while (device_in_reset) {2516usleep_range(50, 200);25172518spin_lock(&hdev->reset_info.lock);2519device_in_reset = !!hdev->reset_info.in_reset;2520if (!device_in_reset)2521hdev->reset_info.in_reset = 1;2522spin_unlock(&hdev->reset_info.lock);25232524if (ktime_compare(ktime_get(), timeout) > 0) {2525dev_crit(hdev->dev,2526"%s Failed to remove device because reset function did not finish\n",2527dev_name(&(hdev)->pdev->dev));2528return;2529}2530}25312532cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work);25332534/* Disable PCI access from device F/W so it won't send us additional2535* interrupts. We disable MSI/MSI-X at the halt_engines function and we2536* can't have the F/W sending us interrupts after that. We need to2537* disable the access here because if the device is marked disable, the2538* message won't be send. Also, in case of heartbeat, the device CPU is2539* marked as disable so this message won't be sent2540*/2541hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);25422543/* Mark device as disabled */2544hdev->disabled = true;25452546take_release_locks(hdev);25472548hdev->reset_info.hard_reset_pending = true;25492550hl_hwmon_fini(hdev);25512552cleanup_resources(hdev, true, false, false);25532554/* Kill processes here after CS rollback. This is because the process2555* can't really exit until all its CSs are done, which is what we2556* do in cs rollback2557*/2558dev_info(hdev->dev,2559"Waiting for all processes to exit (timeout of %u seconds)",2560HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI);25612562hdev->process_kill_trial_cnt = 0;2563rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);2564if (rc) {2565dev_crit(hdev->dev, "Failed to kill all open processes (%d)\n", rc);2566device_disable_open_processes(hdev, false);2567}25682569hdev->process_kill_trial_cnt = 0;2570rc = device_kill_open_processes(hdev, 0, true);2571if (rc) {2572dev_crit(hdev->dev, "Failed to kill all control device open processes (%d)\n", rc);2573device_disable_open_processes(hdev, true);2574}25752576hl_cb_pool_fini(hdev);25772578/* Reset the H/W. It will be in idle state after this returns */2579rc = hdev->asic_funcs->hw_fini(hdev, true, false);2580if (rc)2581dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);25822583/* Reset the H/W (if it accessible). It will be in idle state after this returns */2584if (!hdev->cpld_shutdown) {2585rc = hdev->asic_funcs->hw_fini(hdev, true, false);2586if (rc)2587dev_err(hdev->dev,2588"hw_fini failed in device fini while removing device %d\n", rc);2589}25902591hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;25922593/* Release kernel context */2594if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))2595dev_err(hdev->dev, "kernel ctx is still alive\n");25962597hl_dec_fini(hdev);25982599hl_vm_fini(hdev);26002601hl_mmu_fini(hdev);26022603vfree(hdev->captured_err_info.page_fault_info.user_mappings);26042605hl_eq_fini(hdev, &hdev->event_queue);26062607kfree(hdev->shadow_cs_queue);26082609for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)2610hl_cq_fini(hdev, &hdev->completion_queue[i]);2611kfree(hdev->completion_queue);26122613user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +2614hdev->asic_prop.user_interrupt_count;26152616if (user_interrupt_cnt) {2617if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {2618for (i = 0 ; i < user_interrupt_cnt ; i++)2619vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);2620}26212622kfree(hdev->user_interrupt);2623}26242625vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);26262627hl_hw_queues_destroy(hdev);26282629/* Call ASIC S/W finalize function */2630hdev->asic_funcs->sw_fini(hdev);26312632device_early_fini(hdev);26332634/* Hide devices and sysfs/debugfs files from user */2635cdev_sysfs_debugfs_remove(hdev);2636drm_dev_unregister(&hdev->drm);26372638hl_debugfs_device_fini(hdev);26392640pr_info("removed device successfully\n");2641}26422643/*2644* MMIO register access helper functions.2645*/26462647/*2648* hl_rreg - Read an MMIO register2649*2650* @hdev: pointer to habanalabs device structure2651* @reg: MMIO register offset (in bytes)2652*2653* Returns the value of the MMIO register we are asked to read2654*2655*/2656inline u32 hl_rreg(struct hl_device *hdev, u32 reg)2657{2658u32 val = readl(hdev->rmmio + reg);26592660if (unlikely(trace_habanalabs_rreg32_enabled()))2661trace_habanalabs_rreg32(&(hdev)->pdev->dev, reg, val);26622663return val;2664}26652666/*2667* hl_wreg - Write to an MMIO register2668*2669* @hdev: pointer to habanalabs device structure2670* @reg: MMIO register offset (in bytes)2671* @val: 32-bit value2672*2673* Writes the 32-bit value into the MMIO register2674*2675*/2676inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)2677{2678if (unlikely(trace_habanalabs_wreg32_enabled()))2679trace_habanalabs_wreg32(&(hdev)->pdev->dev, reg, val);26802681writel(val, hdev->rmmio + reg);2682}26832684void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,2685u8 flags)2686{2687struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info;26882689if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {2690dev_err(hdev->dev,2691"Number of possible razwi initiators (%u) exceeded limit (%u)\n",2692num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR);2693return;2694}26952696/* In case it's the first razwi since the device was opened, capture its parameters */2697if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1))2698return;26992700razwi_info->razwi.timestamp = ktime_to_ns(ktime_get());2701razwi_info->razwi.addr = addr;2702razwi_info->razwi.num_of_possible_engines = num_of_engines;2703memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0],2704num_of_engines * sizeof(u16));2705razwi_info->razwi.flags = flags;27062707razwi_info->razwi_info_available = true;2708}27092710void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,2711u8 flags, u64 *event_mask)2712{2713hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags);27142715if (event_mask)2716*event_mask |= HL_NOTIFIER_EVENT_RAZWI;2717}27182719static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)2720{2721struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;2722struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;2723struct hl_vm_hash_node *hnode;2724struct hl_userptr *userptr;2725enum vm_type *vm_type;2726struct hl_ctx *ctx;2727u32 map_idx = 0;2728int i;27292730/* Reset previous session count*/2731pgf_info->num_of_user_mappings = 0;27322733ctx = hl_get_compute_ctx(hdev);2734if (!ctx) {2735dev_err(hdev->dev, "Can't get user context for user mappings\n");2736return;2737}27382739mutex_lock(&ctx->mem_hash_lock);2740hash_for_each(ctx->mem_hash, i, hnode, node) {2741vm_type = hnode->ptr;2742if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) ||2743((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu))2744pgf_info->num_of_user_mappings++;27452746}27472748if (!pgf_info->num_of_user_mappings)2749goto finish;27502751/* In case we already allocated in previous session, need to release it before2752* allocating new buffer.2753*/2754vfree(pgf_info->user_mappings);2755pgf_info->user_mappings =2756vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping));2757if (!pgf_info->user_mappings) {2758pgf_info->num_of_user_mappings = 0;2759goto finish;2760}27612762hash_for_each(ctx->mem_hash, i, hnode, node) {2763vm_type = hnode->ptr;2764if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) {2765userptr = hnode->ptr;2766pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;2767pgf_info->user_mappings[map_idx].size = userptr->size;2768map_idx++;2769} else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) {2770phys_pg_pack = hnode->ptr;2771pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;2772pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size;2773map_idx++;2774}2775}2776finish:2777mutex_unlock(&ctx->mem_hash_lock);2778hl_ctx_put(ctx);2779}27802781void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)2782{2783struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;27842785/* Capture only the first page fault */2786if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1))2787return;27882789pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get());2790pgf_info->page_fault.addr = addr;2791pgf_info->page_fault.engine_id = eng_id;2792hl_capture_user_mappings(hdev, is_pmmu);27932794pgf_info->page_fault_info_available = true;2795}27962797void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,2798u64 *event_mask)2799{2800hl_capture_page_fault(hdev, addr, eng_id, is_pmmu);28012802if (event_mask)2803*event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;2804}28052806static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)2807{2808struct hw_err_info *info = &hdev->captured_err_info.hw_err;28092810/* Capture only the first HW err */2811if (atomic_cmpxchg(&info->event_detected, 0, 1))2812return;28132814info->event.timestamp = ktime_to_ns(ktime_get());2815info->event.event_id = event_id;28162817info->event_info_available = true;2818}28192820void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)2821{2822hl_capture_hw_err(hdev, event_id);28232824if (event_mask)2825*event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;2826}28272828static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)2829{2830struct fw_err_info *info = &hdev->captured_err_info.fw_err;28312832/* Capture only the first FW error */2833if (atomic_cmpxchg(&info->event_detected, 0, 1))2834return;28352836info->event.timestamp = ktime_to_ns(ktime_get());2837info->event.err_type = fw_info->err_type;2838if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)2839info->event.event_id = fw_info->event_id;28402841info->event_info_available = true;2842}28432844void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)2845{2846hl_capture_fw_err(hdev, info);28472848if (info->event_mask)2849*info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;2850}28512852void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count)2853{2854struct engine_err_info *info = &hdev->captured_err_info.engine_err;28552856/* Capture only the first engine error */2857if (atomic_cmpxchg(&info->event_detected, 0, 1))2858return;28592860info->event.timestamp = ktime_to_ns(ktime_get());2861info->event.engine_id = engine_id;2862info->event.error_count = error_count;2863info->event_info_available = true;2864}28652866void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)2867{2868vfree(captured_err_info->page_fault_info.user_mappings);2869memset(captured_err_info, 0, sizeof(struct hl_error_info));2870atomic_set(&captured_err_info->cs_timeout.write_enable, 1);2871captured_err_info->undef_opcode.write_enable = true;2872}28732874void hl_init_cpu_for_irq(struct hl_device *hdev)2875{2876#ifdef CONFIG_NUMA2877struct cpumask *available_mask = &hdev->irq_affinity_mask;2878int numa_node = hdev->pdev->dev.numa_node, i;2879static struct cpumask cpu_mask;28802881if (numa_node < 0)2882return;28832884if (!cpumask_and(&cpu_mask, cpumask_of_node(numa_node), cpu_online_mask)) {2885dev_err(hdev->dev, "No available affinities in current numa node\n");2886return;2887}28882889/* Remove HT siblings */2890for_each_cpu(i, &cpu_mask)2891cpumask_set_cpu(cpumask_first(topology_sibling_cpumask(i)), available_mask);2892#endif2893}28942895void hl_set_irq_affinity(struct hl_device *hdev, int irq)2896{2897if (cpumask_empty(&hdev->irq_affinity_mask)) {2898dev_dbg(hdev->dev, "affinity mask is empty\n");2899return;2900}29012902if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask))2903dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq);2904}29052906void hl_eq_heartbeat_event_handle(struct hl_device *hdev)2907{2908hdev->heartbeat_debug_info.heartbeat_event_counter++;2909hdev->heartbeat_debug_info.last_eq_heartbeat_ts = ktime_get_real_seconds();2910hdev->eq_heartbeat_received = true;2911}29122913void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask)2914{2915struct hl_clk_throttle *clk_throttle = &hdev->clk_throttling;2916ktime_t zero_time = ktime_set(0, 0);29172918mutex_lock(&clk_throttle->lock);29192920switch (event_type) {2921case EQ_EVENT_POWER_EVT_START:2922clk_throttle->current_reason |= HL_CLK_THROTTLE_POWER;2923clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_POWER;2924clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();2925clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;2926dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");2927break;29282929case EQ_EVENT_POWER_EVT_END:2930clk_throttle->current_reason &= ~HL_CLK_THROTTLE_POWER;2931clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();2932dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");2933break;29342935case EQ_EVENT_THERMAL_EVT_START:2936clk_throttle->current_reason |= HL_CLK_THROTTLE_THERMAL;2937clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_THERMAL;2938clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();2939clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;2940*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;2941dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");2942break;29432944case EQ_EVENT_THERMAL_EVT_END:2945clk_throttle->current_reason &= ~HL_CLK_THROTTLE_THERMAL;2946clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();2947*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;2948dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");2949break;29502951default:2952dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type);2953break;2954}29552956mutex_unlock(&clk_throttle->lock);2957}29582959void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask)2960{2961hl_handle_critical_hw_err(hdev, event_id, event_mask);2962*event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;29632964/* Avoid any new accesses to the H/W */2965hdev->disabled = true;2966hdev->cpld_shutdown = true;2967}296829692970