/* SPDX-License-Identifier: MIT */1/* Copyright (C) 2023 Collabora ltd. */2#ifndef _PANTHOR_DRM_H_3#define _PANTHOR_DRM_H_45#include "drm.h"67#if defined(__cplusplus)8extern "C" {9#endif1011/**12* DOC: Introduction13*14* This documentation describes the Panthor IOCTLs.15*16* Just a few generic rules about the data passed to the Panthor IOCTLs:17*18* - Structures must be aligned on 64-bit/8-byte. If the object is not19* naturally aligned, a padding field must be added.20* - Fields must be explicitly aligned to their natural type alignment with21* pad[0..N] fields.22* - All padding fields will be checked by the driver to make sure they are23* zeroed.24* - Flags can be added, but not removed/replaced.25* - New fields can be added to the main structures (the structures26* directly passed to the ioctl). Those fields can be added at the end of27* the structure, or replace existing padding fields. Any new field being28* added must preserve the behavior that existed before those fields were29* added when a value of zero is passed.30* - New fields can be added to indirect objects (objects pointed by the31* main structure), iff those objects are passed a size to reflect the32* size known by the userspace driver (see drm_panthor_obj_array::stride33* or drm_panthor_dev_query::size).34* - If the kernel driver is too old to know some fields, those will be35* ignored if zero, and otherwise rejected (and so will be zero on output).36* - If userspace is too old to know some fields, those will be zeroed37* (input) before the structure is parsed by the kernel driver.38* - Each new flag/field addition must come with a driver version update so39* the userspace driver doesn't have to trial and error to know which40* flags are supported.41* - Structures should not contain unions, as this would defeat the42* extensibility of such structures.43* - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed44* at the end of the drm_panthor_ioctl_id enum.45*/4647/**48* DOC: MMIO regions exposed to userspace.49*50* .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET51*52* File offset for all MMIO regions being exposed to userspace. Don't use53* this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.54* pgoffset passed to mmap2() is an unsigned long, which forces us to use a55* different offset on 32-bit and 64-bit systems.56*57* .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET58*59* File offset for the LATEST_FLUSH_ID register. The Userspace driver controls60* GPU cache flushing through CS instructions, but the flush reduction61* mechanism requires a flush_id. This flush_id could be queried with an62* ioctl, but Arm provides a well-isolated register page containing only this63* read-only register, so let's expose this page through a static mmap offset64* and allow direct mapping of this MMIO region so we can avoid the65* user <-> kernel round-trip.66*/67#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43)68#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56)69#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \70DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \71DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)72#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0)7374/**75* DOC: IOCTL IDs76*77* enum drm_panthor_ioctl_id - IOCTL IDs78*79* Place new ioctls at the end, don't re-order, don't replace or remove entries.80*81* These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx82* definitions instead.83*/84enum drm_panthor_ioctl_id {85/** @DRM_PANTHOR_DEV_QUERY: Query device information. */86DRM_PANTHOR_DEV_QUERY = 0,8788/** @DRM_PANTHOR_VM_CREATE: Create a VM. */89DRM_PANTHOR_VM_CREATE,9091/** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */92DRM_PANTHOR_VM_DESTROY,9394/** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */95DRM_PANTHOR_VM_BIND,9697/** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */98DRM_PANTHOR_VM_GET_STATE,99100/** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */101DRM_PANTHOR_BO_CREATE,102103/**104* @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to105* mmap to map a GEM object.106*/107DRM_PANTHOR_BO_MMAP_OFFSET,108109/** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */110DRM_PANTHOR_GROUP_CREATE,111112/** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */113DRM_PANTHOR_GROUP_DESTROY,114115/**116* @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging117* to a specific scheduling group.118*/119DRM_PANTHOR_GROUP_SUBMIT,120121/** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */122DRM_PANTHOR_GROUP_GET_STATE,123124/** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */125DRM_PANTHOR_TILER_HEAP_CREATE,126127/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */128DRM_PANTHOR_TILER_HEAP_DESTROY,129130/** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */131DRM_PANTHOR_BO_SET_LABEL,132133/**134* @DRM_PANTHOR_SET_USER_MMIO_OFFSET: Set the offset to use as the user MMIO offset.135*136* The default behavior is to pick the MMIO offset based on the size of the pgoff_t137* type seen by the process that manipulates the FD, such that a 32-bit process can138* always map the user MMIO ranges. But this approach doesn't work well for emulators139* like FEX, where the emulator is an 64-bit binary which might be executing 32-bit140* code. In that case, the kernel thinks it's the 64-bit process and assumes141* DRM_PANTHOR_USER_MMIO_OFFSET_64BIT is in use, but the UMD library expects142* DRM_PANTHOR_USER_MMIO_OFFSET_32BIT, because it can't mmap() anything above the143* pgoff_t size.144*/145DRM_PANTHOR_SET_USER_MMIO_OFFSET,146};147148/**149* DOC: IOCTL arguments150*/151152/**153* struct drm_panthor_obj_array - Object array.154*155* This object is used to pass an array of objects whose size is subject to changes in156* future versions of the driver. In order to support this mutability, we pass a stride157* describing the size of the object as known by userspace.158*159* You shouldn't fill drm_panthor_obj_array fields directly. You should instead use160* the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to161* the object size.162*/163struct drm_panthor_obj_array {164/** @stride: Stride of object struct. Used for versioning. */165__u32 stride;166167/** @count: Number of objects in the array. */168__u32 count;169170/** @array: User pointer to an array of objects. */171__u64 array;172};173174/**175* DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.176* @cnt: Number of elements in the array.177* @ptr: Pointer to the array to pass to the kernel.178*179* Macro initializing a drm_panthor_obj_array based on the object size as known180* by userspace.181*/182#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \183{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }184185/**186* enum drm_panthor_sync_op_flags - Synchronization operation flags.187*/188enum drm_panthor_sync_op_flags {189/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */190DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,191192/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */193DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,194195/**196* @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization197* object type.198*/199DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,200201/** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */202DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,203204/** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */205DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),206};207208/**209* struct drm_panthor_sync_op - Synchronization operation.210*/211struct drm_panthor_sync_op {212/** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */213__u32 flags;214215/** @handle: Sync handle. */216__u32 handle;217218/**219* @timeline_value: MBZ if220* (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=221* DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.222*/223__u64 timeline_value;224};225226/**227* enum drm_panthor_dev_query_type - Query type228*229* Place new types at the end, don't re-order, don't remove or replace.230*/231enum drm_panthor_dev_query_type {232/** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */233DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,234235/** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */236DRM_PANTHOR_DEV_QUERY_CSIF_INFO,237238/** @DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: Query timestamp information. */239DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO,240241/**242* @DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: Query allowed group priorities information.243*/244DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO,245};246247/**248* struct drm_panthor_gpu_info - GPU information249*250* Structure grouping all queryable information relating to the GPU.251*/252struct drm_panthor_gpu_info {253/** @gpu_id : GPU ID. */254__u32 gpu_id;255#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28)256#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf)257#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf)258#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf)259#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf)260#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff)261#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf)262263/** @gpu_rev: GPU revision. */264__u32 gpu_rev;265266/** @csf_id: Command stream frontend ID. */267__u32 csf_id;268#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f)269#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f)270#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf)271#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f)272#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f)273#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf)274275/** @l2_features: L2-cache features. */276__u32 l2_features;277278/** @tiler_features: Tiler features. */279__u32 tiler_features;280281/** @mem_features: Memory features. */282__u32 mem_features;283284/** @mmu_features: MMU features. */285__u32 mmu_features;286#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff)287288/** @thread_features: Thread features. */289__u32 thread_features;290291/** @max_threads: Maximum number of threads. */292__u32 max_threads;293294/** @thread_max_workgroup_size: Maximum workgroup size. */295__u32 thread_max_workgroup_size;296297/**298* @thread_max_barrier_size: Maximum number of threads that can wait299* simultaneously on a barrier.300*/301__u32 thread_max_barrier_size;302303/** @coherency_features: Coherency features. */304__u32 coherency_features;305306/** @texture_features: Texture features. */307__u32 texture_features[4];308309/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */310__u32 as_present;311312/** @pad0: MBZ. */313__u32 pad0;314315/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */316__u64 shader_present;317318/** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */319__u64 l2_present;320321/** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */322__u64 tiler_present;323324/** @core_features: Used to discriminate core variants when they exist. */325__u32 core_features;326327/** @pad: MBZ. */328__u32 pad;329330/** @gpu_features: Bitmask describing supported GPU-wide features */331__u64 gpu_features;332};333334/**335* struct drm_panthor_csif_info - Command stream interface information336*337* Structure grouping all queryable information relating to the command stream interface.338*/339struct drm_panthor_csif_info {340/** @csg_slot_count: Number of command stream group slots exposed by the firmware. */341__u32 csg_slot_count;342343/** @cs_slot_count: Number of command stream slots per group. */344__u32 cs_slot_count;345346/** @cs_reg_count: Number of command stream registers. */347__u32 cs_reg_count;348349/** @scoreboard_slot_count: Number of scoreboard slots. */350__u32 scoreboard_slot_count;351352/**353* @unpreserved_cs_reg_count: Number of command stream registers reserved by354* the kernel driver to call a userspace command stream.355*356* All registers can be used by a userspace command stream, but the357* [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are358* used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.359*/360__u32 unpreserved_cs_reg_count;361362/**363* @pad: Padding field, set to zero.364*/365__u32 pad;366};367368/**369* struct drm_panthor_timestamp_info - Timestamp information370*371* Structure grouping all queryable information relating to the GPU timestamp.372*/373struct drm_panthor_timestamp_info {374/**375* @timestamp_frequency: The frequency of the timestamp timer or 0 if376* unknown.377*/378__u64 timestamp_frequency;379380/** @current_timestamp: The current timestamp. */381__u64 current_timestamp;382383/** @timestamp_offset: The offset of the timestamp timer. */384__u64 timestamp_offset;385};386387/**388* struct drm_panthor_group_priorities_info - Group priorities information389*390* Structure grouping all queryable information relating to the allowed group priorities.391*/392struct drm_panthor_group_priorities_info {393/**394* @allowed_mask: Bitmask of the allowed group priorities.395*396* Each bit represents a variant of the enum drm_panthor_group_priority.397*/398__u8 allowed_mask;399400/** @pad: Padding fields, MBZ. */401__u8 pad[3];402};403404/**405* struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY406*/407struct drm_panthor_dev_query {408/** @type: the query type (see drm_panthor_dev_query_type). */409__u32 type;410411/**412* @size: size of the type being queried.413*414* If pointer is NULL, size is updated by the driver to provide the415* output structure size. If pointer is not NULL, the driver will416* only copy min(size, actual_structure_size) bytes to the pointer,417* and update the size accordingly. This allows us to extend query418* types without breaking userspace.419*/420__u32 size;421422/**423* @pointer: user pointer to a query type struct.424*425* Pointer can be NULL, in which case, nothing is copied, but the426* actual structure size is returned. If not NULL, it must point to427* a location that's large enough to hold size bytes.428*/429__u64 pointer;430};431432/**433* struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE434*/435struct drm_panthor_vm_create {436/** @flags: VM flags, MBZ. */437__u32 flags;438439/** @id: Returned VM ID. */440__u32 id;441442/**443* @user_va_range: Size of the VA space reserved for user objects.444*445* The kernel will pick the remaining space to map kernel-only objects to the446* VM (heap chunks, heap context, ring buffers, kernel synchronization objects,447* ...). If the space left for kernel objects is too small, kernel object448* allocation will fail further down the road. One can use449* drm_panthor_gpu_info::mmu_features to extract the total virtual address450* range, and chose a user_va_range that leaves some space to the kernel.451*452* If user_va_range is zero, the kernel will pick a sensible value based on453* TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user454* split should leave enough VA space for userspace processes to support SVM,455* while still allowing the kernel to map some amount of kernel objects in456* the kernel VA range). The value chosen by the driver will be returned in457* @user_va_range.458*459* User VA space always starts at 0x0, kernel VA space is always placed after460* the user VA range.461*/462__u64 user_va_range;463};464465/**466* struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY467*/468struct drm_panthor_vm_destroy {469/** @id: ID of the VM to destroy. */470__u32 id;471472/** @pad: MBZ. */473__u32 pad;474};475476/**477* enum drm_panthor_vm_bind_op_flags - VM bind operation flags478*/479enum drm_panthor_vm_bind_op_flags {480/**481* @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.482*483* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.484*/485DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,486487/**488* @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.489*490* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.491*/492DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,493494/**495* @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.496*497* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.498*/499DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,500501/**502* @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.503*/504DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),505506/** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */507DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,508509/** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */510DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,511512/**513* @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.514*515* Just serves as a synchronization point on a VM queue.516*517* Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,518* and drm_panthor_vm_bind_op::syncs contains at least one element.519*/520DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,521};522523/**524* struct drm_panthor_vm_bind_op - VM bind operation525*/526struct drm_panthor_vm_bind_op {527/** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */528__u32 flags;529530/**531* @bo_handle: Handle of the buffer object to map.532* MBZ for unmap or sync-only operations.533*/534__u32 bo_handle;535536/**537* @bo_offset: Buffer object offset.538* MBZ for unmap or sync-only operations.539*/540__u64 bo_offset;541542/**543* @va: Virtual address to map/unmap.544* MBZ for sync-only operations.545*/546__u64 va;547548/**549* @size: Size to map/unmap.550* MBZ for sync-only operations.551*/552__u64 size;553554/**555* @syncs: Array of struct drm_panthor_sync_op synchronization556* operations.557*558* This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on559* the drm_panthor_vm_bind object containing this VM bind operation.560*561* This array shall not be empty for sync-only operations.562*/563struct drm_panthor_obj_array syncs;564565};566567/**568* enum drm_panthor_vm_bind_flags - VM bind flags569*/570enum drm_panthor_vm_bind_flags {571/**572* @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM573* queue instead of being executed synchronously.574*/575DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,576};577578/**579* struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND580*/581struct drm_panthor_vm_bind {582/** @vm_id: VM targeted by the bind request. */583__u32 vm_id;584585/** @flags: Combination of drm_panthor_vm_bind_flags flags. */586__u32 flags;587588/** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */589struct drm_panthor_obj_array ops;590};591592/**593* enum drm_panthor_vm_state - VM states.594*/595enum drm_panthor_vm_state {596/**597* @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.598*599* New VM operations will be accepted on this VM.600*/601DRM_PANTHOR_VM_STATE_USABLE,602603/**604* @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.605*606* Something put the VM in an unusable state (like an asynchronous607* VM_BIND request failing for any reason).608*609* Once the VM is in this state, all new MAP operations will be610* rejected, and any GPU job targeting this VM will fail.611* UNMAP operations are still accepted.612*613* The only way to recover from an unusable VM is to create a new614* VM, and destroy the old one.615*/616DRM_PANTHOR_VM_STATE_UNUSABLE,617};618619/**620* struct drm_panthor_vm_get_state - Get VM state.621*/622struct drm_panthor_vm_get_state {623/** @vm_id: VM targeted by the get_state request. */624__u32 vm_id;625626/**627* @state: state returned by the driver.628*629* Must be one of the enum drm_panthor_vm_state values.630*/631__u32 state;632};633634/**635* enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.636*/637enum drm_panthor_bo_flags {638/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */639DRM_PANTHOR_BO_NO_MMAP = (1 << 0),640};641642/**643* struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.644*/645struct drm_panthor_bo_create {646/**647* @size: Requested size for the object648*649* The (page-aligned) allocated size for the object will be returned.650*/651__u64 size;652653/**654* @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.655*/656__u32 flags;657658/**659* @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.660*661* If not zero, the field must refer to a valid VM ID, and implies that:662* - the buffer object will only ever be bound to that VM663* - cannot be exported as a PRIME fd664*/665__u32 exclusive_vm_id;666667/**668* @handle: Returned handle for the object.669*670* Object handles are nonzero.671*/672__u32 handle;673674/** @pad: MBZ. */675__u32 pad;676};677678/**679* struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.680*/681struct drm_panthor_bo_mmap_offset {682/** @handle: Handle of the object we want an mmap offset for. */683__u32 handle;684685/** @pad: MBZ. */686__u32 pad;687688/** @offset: The fake offset to use for subsequent mmap calls. */689__u64 offset;690};691692/**693* struct drm_panthor_queue_create - Queue creation arguments.694*/695struct drm_panthor_queue_create {696/**697* @priority: Defines the priority of queues inside a group. Goes from 0 to 15,698* 15 being the highest priority.699*/700__u8 priority;701702/** @pad: Padding fields, MBZ. */703__u8 pad[3];704705/** @ringbuf_size: Size of the ring buffer to allocate to this queue. */706__u32 ringbuf_size;707};708709/**710* enum drm_panthor_group_priority - Scheduling group priority711*/712enum drm_panthor_group_priority {713/** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */714PANTHOR_GROUP_PRIORITY_LOW = 0,715716/** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */717PANTHOR_GROUP_PRIORITY_MEDIUM,718719/**720* @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.721*722* Requires CAP_SYS_NICE or DRM_MASTER.723*/724PANTHOR_GROUP_PRIORITY_HIGH,725726/**727* @PANTHOR_GROUP_PRIORITY_REALTIME: Realtime priority group.728*729* Requires CAP_SYS_NICE or DRM_MASTER.730*/731PANTHOR_GROUP_PRIORITY_REALTIME,732};733734/**735* struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE736*/737struct drm_panthor_group_create {738/** @queues: Array of drm_panthor_queue_create elements. */739struct drm_panthor_obj_array queues;740741/**742* @max_compute_cores: Maximum number of cores that can be used by compute743* jobs across CS queues bound to this group.744*745* Must be less or equal to the number of bits set in @compute_core_mask.746*/747__u8 max_compute_cores;748749/**750* @max_fragment_cores: Maximum number of cores that can be used by fragment751* jobs across CS queues bound to this group.752*753* Must be less or equal to the number of bits set in @fragment_core_mask.754*/755__u8 max_fragment_cores;756757/**758* @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs759* across CS queues bound to this group.760*761* Must be less or equal to the number of bits set in @tiler_core_mask.762*/763__u8 max_tiler_cores;764765/** @priority: Group priority (see enum drm_panthor_group_priority). */766__u8 priority;767768/** @pad: Padding field, MBZ. */769__u32 pad;770771/**772* @compute_core_mask: Mask encoding cores that can be used for compute jobs.773*774* This field must have at least @max_compute_cores bits set.775*776* The bits set here should also be set in drm_panthor_gpu_info::shader_present.777*/778__u64 compute_core_mask;779780/**781* @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.782*783* This field must have at least @max_fragment_cores bits set.784*785* The bits set here should also be set in drm_panthor_gpu_info::shader_present.786*/787__u64 fragment_core_mask;788789/**790* @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.791*792* This field must have at least @max_tiler_cores bits set.793*794* The bits set here should also be set in drm_panthor_gpu_info::tiler_present.795*/796__u64 tiler_core_mask;797798/**799* @vm_id: VM ID to bind this group to.800*801* All submission to queues bound to this group will use this VM.802*/803__u32 vm_id;804805/**806* @group_handle: Returned group handle. Passed back when submitting jobs or807* destroying a group.808*/809__u32 group_handle;810};811812/**813* struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY814*/815struct drm_panthor_group_destroy {816/** @group_handle: Group to destroy */817__u32 group_handle;818819/** @pad: Padding field, MBZ. */820__u32 pad;821};822823/**824* struct drm_panthor_queue_submit - Job submission arguments.825*826* This is describing the userspace command stream to call from the kernel827* command stream ring-buffer. Queue submission is always part of a group828* submission, taking one or more jobs to submit to the underlying queues.829*/830struct drm_panthor_queue_submit {831/** @queue_index: Index of the queue inside a group. */832__u32 queue_index;833834/**835* @stream_size: Size of the command stream to execute.836*837* Must be 64-bit/8-byte aligned (the size of a CS instruction)838*839* Can be zero if stream_addr is zero too.840*841* When the stream size is zero, the queue submit serves as a842* synchronization point.843*/844__u32 stream_size;845846/**847* @stream_addr: GPU address of the command stream to execute.848*849* Must be aligned on 64-byte.850*851* Can be zero is stream_size is zero too.852*/853__u64 stream_addr;854855/**856* @latest_flush: FLUSH_ID read at the time the stream was built.857*858* This allows cache flush elimination for the automatic859* flush+invalidate(all) done at submission time, which is needed to860* ensure the GPU doesn't get garbage when reading the indirect command861* stream buffers. If you want the cache flush to happen862* unconditionally, pass a zero here.863*864* Ignored when stream_size is zero.865*/866__u32 latest_flush;867868/** @pad: MBZ. */869__u32 pad;870871/** @syncs: Array of struct drm_panthor_sync_op sync operations. */872struct drm_panthor_obj_array syncs;873};874875/**876* struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT877*/878struct drm_panthor_group_submit {879/** @group_handle: Handle of the group to queue jobs to. */880__u32 group_handle;881882/** @pad: MBZ. */883__u32 pad;884885/** @queue_submits: Array of drm_panthor_queue_submit objects. */886struct drm_panthor_obj_array queue_submits;887};888889/**890* enum drm_panthor_group_state_flags - Group state flags891*/892enum drm_panthor_group_state_flags {893/**894* @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.895*896* When a group ends up with this flag set, no jobs can be submitted to its queues.897*/898DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,899900/**901* @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.902*903* When a group ends up with this flag set, no jobs can be submitted to its queues.904*/905DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,906907/**908* @DRM_PANTHOR_GROUP_STATE_INNOCENT: Group was killed during a reset caused by other909* groups.910*911* This flag can only be set if DRM_PANTHOR_GROUP_STATE_TIMEDOUT is set and912* DRM_PANTHOR_GROUP_STATE_FATAL_FAULT is not.913*/914DRM_PANTHOR_GROUP_STATE_INNOCENT = 1 << 2,915};916917/**918* struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE919*920* Used to query the state of a group and decide whether a new group should be created to921* replace it.922*/923struct drm_panthor_group_get_state {924/** @group_handle: Handle of the group to query state on */925__u32 group_handle;926927/**928* @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the929* group state.930*/931__u32 state;932933/** @fatal_queues: Bitmask of queues that faced fatal faults. */934__u32 fatal_queues;935936/** @pad: MBZ */937__u32 pad;938};939940/**941* struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE942*/943struct drm_panthor_tiler_heap_create {944/** @vm_id: VM ID the tiler heap should be mapped to */945__u32 vm_id;946947/** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */948__u32 initial_chunk_count;949950/**951* @chunk_size: Chunk size.952*953* Must be page-aligned and lie in the [128k:8M] range.954*/955__u32 chunk_size;956957/**958* @max_chunks: Maximum number of chunks that can be allocated.959*960* Must be at least @initial_chunk_count.961*/962__u32 max_chunks;963964/**965* @target_in_flight: Maximum number of in-flight render passes.966*967* If the heap has more than tiler jobs in-flight, the FW will wait for render968* passes to finish before queuing new tiler jobs.969*/970__u32 target_in_flight;971972/** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */973__u32 handle;974975/** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */976__u64 tiler_heap_ctx_gpu_va;977978/**979* @first_heap_chunk_gpu_va: First heap chunk.980*981* The tiler heap is formed of heap chunks forming a single-link list. This982* is the first element in the list.983*/984__u64 first_heap_chunk_gpu_va;985};986987/**988* struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY989*/990struct drm_panthor_tiler_heap_destroy {991/**992* @handle: Handle of the tiler heap to destroy.993*994* Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.995*/996__u32 handle;997998/** @pad: Padding field, MBZ. */999__u32 pad;1000};10011002/**1003* struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL1004*/1005struct drm_panthor_bo_set_label {1006/** @handle: Handle of the buffer object to label. */1007__u32 handle;10081009/** @pad: MBZ. */1010__u32 pad;10111012/**1013* @label: User pointer to a NUL-terminated string1014*1015* Length cannot be greater than 40961016*/1017__u64 label;1018};10191020/**1021* struct drm_panthor_set_user_mmio_offset - Arguments passed to1022* DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET1023*1024* This ioctl is only really useful if you want to support userspace1025* CPU emulation environments where the size of an unsigned long differs1026* between the host and the guest architectures.1027*/1028struct drm_panthor_set_user_mmio_offset {1029/**1030* @offset: User MMIO offset to use.1031*1032* Must be either DRM_PANTHOR_USER_MMIO_OFFSET_32BIT or1033* DRM_PANTHOR_USER_MMIO_OFFSET_64BIT.1034*1035* Use DRM_PANTHOR_USER_MMIO_OFFSET (which selects OFFSET_32BIT or1036* OFFSET_64BIT based on the size of an unsigned long) unless you1037* have a very good reason to overrule this decision.1038*/1039__u64 offset;1040};10411042/**1043* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number1044* @__access: Access type. Must be R, W or RW.1045* @__id: One of the DRM_PANTHOR_xxx id.1046* @__type: Suffix of the type being passed to the IOCTL.1047*1048* Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx1049* values instead.1050*1051* Return: An IOCTL number to be passed to ioctl() from userspace.1052*/1053#define DRM_IOCTL_PANTHOR(__access, __id, __type) \1054DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \1055struct drm_panthor_ ## __type)10561057enum {1058DRM_IOCTL_PANTHOR_DEV_QUERY =1059DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query),1060DRM_IOCTL_PANTHOR_VM_CREATE =1061DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create),1062DRM_IOCTL_PANTHOR_VM_DESTROY =1063DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy),1064DRM_IOCTL_PANTHOR_VM_BIND =1065DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind),1066DRM_IOCTL_PANTHOR_VM_GET_STATE =1067DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state),1068DRM_IOCTL_PANTHOR_BO_CREATE =1069DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create),1070DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET =1071DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset),1072DRM_IOCTL_PANTHOR_GROUP_CREATE =1073DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create),1074DRM_IOCTL_PANTHOR_GROUP_DESTROY =1075DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy),1076DRM_IOCTL_PANTHOR_GROUP_SUBMIT =1077DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit),1078DRM_IOCTL_PANTHOR_GROUP_GET_STATE =1079DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state),1080DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE =1081DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create),1082DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY =1083DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),1084DRM_IOCTL_PANTHOR_BO_SET_LABEL =1085DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),1086DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =1087DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),1088};10891090#if defined(__cplusplus)1091}1092#endif10931094#endif /* _PANTHOR_DRM_H_ */109510961097