Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/uapi/drm/xe_drm.h
29278 views
1
/* SPDX-License-Identifier: MIT */
2
/*
3
* Copyright © 2023 Intel Corporation
4
*/
5
6
#ifndef _UAPI_XE_DRM_H_
7
#define _UAPI_XE_DRM_H_
8
9
#include "drm.h"
10
11
#if defined(__cplusplus)
12
extern "C" {
13
#endif
14
15
/*
16
* Please note that modifications to all structs defined here are
17
* subject to backwards-compatibility constraints.
18
* Sections in this file are organized as follows:
19
* 1. IOCTL definition
20
* 2. Extension definition and helper structs
21
* 3. IOCTL's Query structs in the order of the Query's entries.
22
* 4. The rest of IOCTL structs in the order of IOCTL declaration.
23
*/
24
25
/**
26
* DOC: Xe Device Block Diagram
27
*
28
* The diagram below represents a high-level simplification of a discrete
29
* GPU supported by the Xe driver. It shows some device components which
30
* are necessary to understand this API, as well as how their relations
31
* to each other. This diagram does not represent real hardware::
32
*
33
* ┌──────────────────────────────────────────────────────────────────┐
34
* │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
35
* │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │
36
* │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │
37
* │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │
38
* │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
39
* │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │
40
* │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
41
* │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │
42
* │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
43
* │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
44
* │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │
45
* │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
46
* │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
47
* │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
48
* │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
49
* │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
50
* │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │
51
* │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
52
* │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
53
* │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │
54
* │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
55
* │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │
56
* │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │
57
* │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
58
* │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
59
* └─────────────────────────────Device0───────┬──────────────────────┘
60
* │
61
* ───────────────────────┴────────── PCI bus
62
*/
63
64
/**
65
* DOC: Xe uAPI Overview
66
*
67
* This section aims to describe the Xe's IOCTL entries, its structs, and other
68
* Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
69
* entries and usage.
70
*
71
* List of supported IOCTLs:
72
* - &DRM_IOCTL_XE_DEVICE_QUERY
73
* - &DRM_IOCTL_XE_GEM_CREATE
74
* - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
75
* - &DRM_IOCTL_XE_VM_CREATE
76
* - &DRM_IOCTL_XE_VM_DESTROY
77
* - &DRM_IOCTL_XE_VM_BIND
78
* - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
79
* - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
80
* - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
81
* - &DRM_IOCTL_XE_EXEC
82
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
83
* - &DRM_IOCTL_XE_OBSERVATION
84
* - &DRM_IOCTL_XE_MADVISE
85
* - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
86
*/
87
88
/*
89
* xe specific ioctls.
90
*
91
* The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
92
* [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
93
* against DRM_COMMAND_BASE and should be between [0x0, 0x60).
94
*/
95
#define DRM_XE_DEVICE_QUERY 0x00
96
#define DRM_XE_GEM_CREATE 0x01
97
#define DRM_XE_GEM_MMAP_OFFSET 0x02
98
#define DRM_XE_VM_CREATE 0x03
99
#define DRM_XE_VM_DESTROY 0x04
100
#define DRM_XE_VM_BIND 0x05
101
#define DRM_XE_EXEC_QUEUE_CREATE 0x06
102
#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
103
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
104
#define DRM_XE_EXEC 0x09
105
#define DRM_XE_WAIT_USER_FENCE 0x0a
106
#define DRM_XE_OBSERVATION 0x0b
107
#define DRM_XE_MADVISE 0x0c
108
#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
109
110
/* Must be kept compact -- no holes */
111
112
#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
113
#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
114
#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
115
#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
116
#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
117
#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
118
#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
119
#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
120
#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
121
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
122
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
123
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
124
#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
125
#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
126
127
/**
128
* DOC: Xe IOCTL Extensions
129
*
130
* Before detailing the IOCTLs and its structs, it is important to highlight
131
* that every IOCTL in Xe is extensible.
132
*
133
* Many interfaces need to grow over time. In most cases we can simply
134
* extend the struct and have userspace pass in more data. Another option,
135
* as demonstrated by Vulkan's approach to providing extensions for forward
136
* and backward compatibility, is to use a list of optional structs to
137
* provide those extra details.
138
*
139
* The key advantage to using an extension chain is that it allows us to
140
* redefine the interface more easily than an ever growing struct of
141
* increasing complexity, and for large parts of that interface to be
142
* entirely optional. The downside is more pointer chasing; chasing across
143
* the __user boundary with pointers encapsulated inside u64.
144
*
145
* Example chaining:
146
*
147
* .. code-block:: C
148
*
149
* struct drm_xe_user_extension ext3 {
150
* .next_extension = 0, // end
151
* .name = ...,
152
* };
153
* struct drm_xe_user_extension ext2 {
154
* .next_extension = (uintptr_t)&ext3,
155
* .name = ...,
156
* };
157
* struct drm_xe_user_extension ext1 {
158
* .next_extension = (uintptr_t)&ext2,
159
* .name = ...,
160
* };
161
*
162
* Typically the struct drm_xe_user_extension would be embedded in some uAPI
163
* struct, and in this case we would feed it the head of the chain(i.e ext1),
164
* which would then apply all of the above extensions.
165
*/
166
167
/**
168
* struct drm_xe_user_extension - Base class for defining a chain of extensions
169
*/
170
struct drm_xe_user_extension {
171
/**
172
* @next_extension:
173
*
174
* Pointer to the next struct drm_xe_user_extension, or zero if the end.
175
*/
176
__u64 next_extension;
177
178
/**
179
* @name: Name of the extension.
180
*
181
* Note that the name here is just some integer.
182
*
183
* Also note that the name space for this is not global for the whole
184
* driver, but rather its scope/meaning is limited to the specific piece
185
* of uAPI which has embedded the struct drm_xe_user_extension.
186
*/
187
__u32 name;
188
189
/**
190
* @pad: MBZ
191
*
192
* All undefined bits must be zero.
193
*/
194
__u32 pad;
195
};
196
197
/**
198
* struct drm_xe_ext_set_property - Generic set property extension
199
*
200
* A generic struct that allows any of the Xe's IOCTL to be extended
201
* with a set_property operation.
202
*/
203
struct drm_xe_ext_set_property {
204
/** @base: base user extension */
205
struct drm_xe_user_extension base;
206
207
/** @property: property to set */
208
__u32 property;
209
210
/** @pad: MBZ */
211
__u32 pad;
212
213
/** @value: property value */
214
__u64 value;
215
216
/** @reserved: Reserved */
217
__u64 reserved[2];
218
};
219
220
/**
221
* struct drm_xe_engine_class_instance - instance of an engine class
222
*
223
* It is returned as part of the @drm_xe_engine, but it also is used as
224
* the input of engine selection for both @drm_xe_exec_queue_create and
225
* @drm_xe_query_engine_cycles
226
*
227
* The @engine_class can be:
228
* - %DRM_XE_ENGINE_CLASS_RENDER
229
* - %DRM_XE_ENGINE_CLASS_COPY
230
* - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
231
* - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
232
* - %DRM_XE_ENGINE_CLASS_COMPUTE
233
* - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
234
* hardware engine class). Used for creating ordered queues of VM
235
* bind operations.
236
*/
237
struct drm_xe_engine_class_instance {
238
#define DRM_XE_ENGINE_CLASS_RENDER 0
239
#define DRM_XE_ENGINE_CLASS_COPY 1
240
#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
241
#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
242
#define DRM_XE_ENGINE_CLASS_COMPUTE 4
243
#define DRM_XE_ENGINE_CLASS_VM_BIND 5
244
/** @engine_class: engine class id */
245
__u16 engine_class;
246
/** @engine_instance: engine instance id */
247
__u16 engine_instance;
248
/** @gt_id: Unique ID of this GT within the PCI Device */
249
__u16 gt_id;
250
/** @pad: MBZ */
251
__u16 pad;
252
};
253
254
/**
255
* struct drm_xe_engine - describe hardware engine
256
*/
257
struct drm_xe_engine {
258
/** @instance: The @drm_xe_engine_class_instance */
259
struct drm_xe_engine_class_instance instance;
260
261
/** @reserved: Reserved */
262
__u64 reserved[3];
263
};
264
265
/**
266
* struct drm_xe_query_engines - describe engines
267
*
268
* If a query is made with a struct @drm_xe_device_query where .query
269
* is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
270
* struct @drm_xe_query_engines in .data.
271
*/
272
struct drm_xe_query_engines {
273
/** @num_engines: number of engines returned in @engines */
274
__u32 num_engines;
275
/** @pad: MBZ */
276
__u32 pad;
277
/** @engines: The returned engines for this device */
278
struct drm_xe_engine engines[];
279
};
280
281
/**
282
* enum drm_xe_memory_class - Supported memory classes.
283
*/
284
enum drm_xe_memory_class {
285
/** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
286
DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
287
/**
288
* @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
289
* represents the memory that is local to the device, which we
290
* call VRAM. Not valid on integrated platforms.
291
*/
292
DRM_XE_MEM_REGION_CLASS_VRAM
293
};
294
295
/**
296
* struct drm_xe_mem_region - Describes some region as known to
297
* the driver.
298
*/
299
struct drm_xe_mem_region {
300
/**
301
* @mem_class: The memory class describing this region.
302
*
303
* See enum drm_xe_memory_class for supported values.
304
*/
305
__u16 mem_class;
306
/**
307
* @instance: The unique ID for this region, which serves as the
308
* index in the placement bitmask used as argument for
309
* &DRM_IOCTL_XE_GEM_CREATE
310
*/
311
__u16 instance;
312
/**
313
* @min_page_size: Min page-size in bytes for this region.
314
*
315
* When the kernel allocates memory for this region, the
316
* underlying pages will be at least @min_page_size in size.
317
* Buffer objects with an allowable placement in this region must be
318
* created with a size aligned to this value.
319
* GPU virtual address mappings of (parts of) buffer objects that
320
* may be placed in this region must also have their GPU virtual
321
* address and range aligned to this value.
322
* Affected IOCTLS will return %-EINVAL if alignment restrictions are
323
* not met.
324
*/
325
__u32 min_page_size;
326
/**
327
* @total_size: The usable size in bytes for this region.
328
*/
329
__u64 total_size;
330
/**
331
* @used: Estimate of the memory used in bytes for this region.
332
*
333
* Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
334
* accounting. Without this the value here will always equal
335
* zero.
336
*/
337
__u64 used;
338
/**
339
* @cpu_visible_size: How much of this region can be CPU
340
* accessed, in bytes.
341
*
342
* This will always be <= @total_size, and the remainder (if
343
* any) will not be CPU accessible. If the CPU accessible part
344
* is smaller than @total_size then this is referred to as a
345
* small BAR system.
346
*
347
* On systems without small BAR (full BAR), the probed_size will
348
* always equal the @total_size, since all of it will be CPU
349
* accessible.
350
*
351
* Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
352
* regions (for other types the value here will always equal
353
* zero).
354
*/
355
__u64 cpu_visible_size;
356
/**
357
* @cpu_visible_used: Estimate of CPU visible memory used, in
358
* bytes.
359
*
360
* Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
361
* accounting. Without this the value here will always equal
362
* zero. Note this is only currently tracked for
363
* DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
364
* here will always be zero).
365
*/
366
__u64 cpu_visible_used;
367
/** @reserved: Reserved */
368
__u64 reserved[6];
369
};
370
371
/**
372
* struct drm_xe_query_mem_regions - describe memory regions
373
*
374
* If a query is made with a struct drm_xe_device_query where .query
375
* is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
376
* struct drm_xe_query_mem_regions in .data.
377
*/
378
struct drm_xe_query_mem_regions {
379
/** @num_mem_regions: number of memory regions returned in @mem_regions */
380
__u32 num_mem_regions;
381
/** @pad: MBZ */
382
__u32 pad;
383
/** @mem_regions: The returned memory regions for this device */
384
struct drm_xe_mem_region mem_regions[];
385
};
386
387
/**
388
* struct drm_xe_query_config - describe the device configuration
389
*
390
* If a query is made with a struct drm_xe_device_query where .query
391
* is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
392
* struct drm_xe_query_config in .data.
393
*
394
* The index in @info can be:
395
* - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
396
* and the device revision (next 8 bits)
397
* - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
398
* configuration, see list below
399
*
400
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
401
* has usable VRAM
402
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
403
* has low latency hint support
404
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
405
* device has CPU address mirroring support
406
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
407
* required by this device, typically SZ_4K or SZ_64K
408
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
409
* - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
410
* available exec queue priority
411
*/
412
struct drm_xe_query_config {
413
/** @num_params: number of parameters returned in info */
414
__u32 num_params;
415
416
/** @pad: MBZ */
417
__u32 pad;
418
419
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
420
#define DRM_XE_QUERY_CONFIG_FLAGS 1
421
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
422
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
423
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
424
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
425
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
426
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
427
/** @info: array of elements containing the config info */
428
__u64 info[];
429
};
430
431
/**
432
* struct drm_xe_gt - describe an individual GT.
433
*
434
* To be used with drm_xe_query_gt_list, which will return a list with all the
435
* existing GT individual descriptions.
436
* Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
437
* implementing graphics and/or media operations.
438
*
439
* The index in @type can be:
440
* - %DRM_XE_QUERY_GT_TYPE_MAIN
441
* - %DRM_XE_QUERY_GT_TYPE_MEDIA
442
*/
443
struct drm_xe_gt {
444
#define DRM_XE_QUERY_GT_TYPE_MAIN 0
445
#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
446
/** @type: GT type: Main or Media */
447
__u16 type;
448
/** @tile_id: Tile ID where this GT lives (Information only) */
449
__u16 tile_id;
450
/** @gt_id: Unique ID of this GT within the PCI Device */
451
__u16 gt_id;
452
/** @pad: MBZ */
453
__u16 pad[3];
454
/** @reference_clock: A clock frequency for timestamp */
455
__u32 reference_clock;
456
/**
457
* @near_mem_regions: Bit mask of instances from
458
* drm_xe_query_mem_regions that are nearest to the current engines
459
* of this GT.
460
* Each index in this mask refers directly to the struct
461
* drm_xe_query_mem_regions' instance, no assumptions should
462
* be made about order. The type of each region is described
463
* by struct drm_xe_query_mem_regions' mem_class.
464
*/
465
__u64 near_mem_regions;
466
/**
467
* @far_mem_regions: Bit mask of instances from
468
* drm_xe_query_mem_regions that are far from the engines of this GT.
469
* In general, they have extra indirections when compared to the
470
* @near_mem_regions. For a discrete device this could mean system
471
* memory and memory living in a different tile.
472
* Each index in this mask refers directly to the struct
473
* drm_xe_query_mem_regions' instance, no assumptions should
474
* be made about order. The type of each region is described
475
* by struct drm_xe_query_mem_regions' mem_class.
476
*/
477
__u64 far_mem_regions;
478
/** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */
479
__u16 ip_ver_major;
480
/** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */
481
__u16 ip_ver_minor;
482
/** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */
483
__u16 ip_ver_rev;
484
/** @pad2: MBZ */
485
__u16 pad2;
486
/** @reserved: Reserved */
487
__u64 reserved[7];
488
};
489
490
/**
491
* struct drm_xe_query_gt_list - A list with GT description items.
492
*
493
* If a query is made with a struct drm_xe_device_query where .query
494
* is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
495
* drm_xe_query_gt_list in .data.
496
*/
497
struct drm_xe_query_gt_list {
498
/** @num_gt: number of GT items returned in gt_list */
499
__u32 num_gt;
500
/** @pad: MBZ */
501
__u32 pad;
502
/** @gt_list: The GT list returned for this device */
503
struct drm_xe_gt gt_list[];
504
};
505
506
/**
507
* struct drm_xe_query_topology_mask - describe the topology mask of a GT
508
*
509
* This is the hardware topology which reflects the internal physical
510
* structure of the GPU.
511
*
512
* If a query is made with a struct drm_xe_device_query where .query
513
* is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
514
* struct drm_xe_query_topology_mask in .data.
515
*
516
* The @type can be:
517
* - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
518
* (DSS) available for geometry operations. For example a query response
519
* containing the following in mask:
520
* ``DSS_GEOMETRY ff ff ff ff 00 00 00 00``
521
* means 32 DSS are available for geometry.
522
* - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
523
* (DSS) available for compute operations. For example a query response
524
* containing the following in mask:
525
* ``DSS_COMPUTE ff ff ff ff 00 00 00 00``
526
* means 32 DSS are available for compute.
527
* - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks. This type
528
* may be omitted if the driver is unable to query the mask from the
529
* hardware.
530
* - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
531
* available per Dual Sub Slices (DSS). For example a query response
532
* containing the following in mask:
533
* ``EU_PER_DSS ff ff 00 00 00 00 00 00``
534
* means each DSS has 16 SIMD8 EUs. This type may be omitted if device
535
* doesn't have SIMD8 EUs.
536
* - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution
537
* Units (EU) available per Dual Sub Slices (DSS). For example a query
538
* response containing the following in mask:
539
* ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00``
540
* means each DSS has 16 SIMD16 EUs. This type may be omitted if device
541
* doesn't have SIMD16 EUs.
542
*/
543
struct drm_xe_query_topology_mask {
544
/** @gt_id: GT ID the mask is associated with */
545
__u16 gt_id;
546
547
#define DRM_XE_TOPO_DSS_GEOMETRY 1
548
#define DRM_XE_TOPO_DSS_COMPUTE 2
549
#define DRM_XE_TOPO_L3_BANK 3
550
#define DRM_XE_TOPO_EU_PER_DSS 4
551
#define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5
552
/** @type: type of mask */
553
__u16 type;
554
555
/** @num_bytes: number of bytes in requested mask */
556
__u32 num_bytes;
557
558
/** @mask: little-endian mask of @num_bytes */
559
__u8 mask[];
560
};
561
562
/**
563
* struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
564
*
565
* If a query is made with a struct drm_xe_device_query where .query is equal to
566
* DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
567
* in .data. struct drm_xe_query_engine_cycles is allocated by the user and
568
* .data points to this allocated structure.
569
*
570
* The query returns the engine cycles, which along with GT's @reference_clock,
571
* can be used to calculate the engine timestamp. In addition the
572
* query returns a set of cpu timestamps that indicate when the command
573
* streamer cycle count was captured.
574
*/
575
struct drm_xe_query_engine_cycles {
576
/**
577
* @eci: This is input by the user and is the engine for which command
578
* streamer cycles is queried.
579
*/
580
struct drm_xe_engine_class_instance eci;
581
582
/**
583
* @clockid: This is input by the user and is the reference clock id for
584
* CPU timestamp. For definition, see clock_gettime(2) and
585
* perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
586
* CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
587
*/
588
__s32 clockid;
589
590
/** @width: Width of the engine cycle counter in bits. */
591
__u32 width;
592
593
/**
594
* @engine_cycles: Engine cycles as read from its register
595
* at 0x358 offset.
596
*/
597
__u64 engine_cycles;
598
599
/**
600
* @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
601
* reading the engine_cycles register using the reference clockid set by the
602
* user.
603
*/
604
__u64 cpu_timestamp;
605
606
/**
607
* @cpu_delta: Time delta in ns captured around reading the lower dword
608
* of the engine_cycles register.
609
*/
610
__u64 cpu_delta;
611
};
612
613
/**
614
* struct drm_xe_query_uc_fw_version - query a micro-controller firmware version
615
*
616
* Given a uc_type this will return the branch, major, minor and patch version
617
* of the micro-controller firmware.
618
*/
619
struct drm_xe_query_uc_fw_version {
620
/** @uc_type: The micro-controller type to query firmware version */
621
#define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
622
#define XE_QUERY_UC_TYPE_HUC 1
623
__u16 uc_type;
624
625
/** @pad: MBZ */
626
__u16 pad;
627
628
/** @branch_ver: branch uc fw version */
629
__u32 branch_ver;
630
/** @major_ver: major uc fw version */
631
__u32 major_ver;
632
/** @minor_ver: minor uc fw version */
633
__u32 minor_ver;
634
/** @patch_ver: patch uc fw version */
635
__u32 patch_ver;
636
637
/** @pad2: MBZ */
638
__u32 pad2;
639
640
/** @reserved: Reserved */
641
__u64 reserved;
642
};
643
644
/**
645
* struct drm_xe_query_pxp_status - query if PXP is ready
646
*
647
* If PXP is enabled and no fatal error has occurred, the status will be set to
648
* one of the following values:
649
* 0: PXP init still in progress
650
* 1: PXP init complete
651
*
652
* If PXP is not enabled or something has gone wrong, the query will be failed
653
* with one of the following error codes:
654
* -ENODEV: PXP not supported or disabled;
655
* -EIO: fatal error occurred during init, so PXP will never be enabled;
656
* -EINVAL: incorrect value provided as part of the query;
657
* -EFAULT: error copying the memory between kernel and userspace.
658
*
659
* The status can only be 0 in the first few seconds after driver load. If
660
* everything works as expected, the status will transition to init complete in
661
* less than 1 second, while in case of errors the driver might take longer to
662
* start returning an error code, but it should still take less than 10 seconds.
663
*
664
* The supported session type bitmask is based on the values in
665
* enum drm_xe_pxp_session_type. TYPE_NONE is always supported and therefore
666
* is not reported in the bitmask.
667
*
668
*/
669
struct drm_xe_query_pxp_status {
670
/** @status: current PXP status */
671
__u32 status;
672
673
/** @supported_session_types: bitmask of supported PXP session types */
674
__u32 supported_session_types;
675
};
676
677
/**
678
* struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
679
* structure to query device information
680
*
681
* The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
682
* and sets the value in the query member. This determines the type of
683
* the structure provided by the driver in data, among struct drm_xe_query_*.
684
*
685
* The @query can be:
686
* - %DRM_XE_DEVICE_QUERY_ENGINES
687
* - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
688
* - %DRM_XE_DEVICE_QUERY_CONFIG
689
* - %DRM_XE_DEVICE_QUERY_GT_LIST
690
* - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
691
* configuration of the device such as information on slices, memory,
692
* caches, and so on. It is provided as a table of key / value
693
* attributes.
694
* - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
695
* - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
696
* - %DRM_XE_DEVICE_QUERY_PXP_STATUS
697
*
698
* If size is set to 0, the driver fills it with the required size for
699
* the requested type of data to query. If size is equal to the required
700
* size, the queried information is copied into data. If size is set to
701
* a value different from 0 and different from the required size, the
702
* IOCTL call returns -EINVAL.
703
*
704
* For example the following code snippet allows retrieving and printing
705
* information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
706
*
707
* .. code-block:: C
708
*
709
* struct drm_xe_query_engines *engines;
710
* struct drm_xe_device_query query = {
711
* .extensions = 0,
712
* .query = DRM_XE_DEVICE_QUERY_ENGINES,
713
* .size = 0,
714
* .data = 0,
715
* };
716
* ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
717
* engines = malloc(query.size);
718
* query.data = (uintptr_t)engines;
719
* ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
720
* for (int i = 0; i < engines->num_engines; i++) {
721
* printf("Engine %d: %s\n", i,
722
* engines->engines[i].instance.engine_class ==
723
* DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
724
* engines->engines[i].instance.engine_class ==
725
* DRM_XE_ENGINE_CLASS_COPY ? "COPY":
726
* engines->engines[i].instance.engine_class ==
727
* DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
728
* engines->engines[i].instance.engine_class ==
729
* DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
730
* engines->engines[i].instance.engine_class ==
731
* DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
732
* "UNKNOWN");
733
* }
734
* free(engines);
735
*/
736
struct drm_xe_device_query {
737
/** @extensions: Pointer to the first extension struct, if any */
738
__u64 extensions;
739
740
#define DRM_XE_DEVICE_QUERY_ENGINES 0
741
#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
742
#define DRM_XE_DEVICE_QUERY_CONFIG 2
743
#define DRM_XE_DEVICE_QUERY_GT_LIST 3
744
#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
745
#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
746
#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
747
#define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7
748
#define DRM_XE_DEVICE_QUERY_OA_UNITS 8
749
#define DRM_XE_DEVICE_QUERY_PXP_STATUS 9
750
#define DRM_XE_DEVICE_QUERY_EU_STALL 10
751
/** @query: The type of data to query */
752
__u32 query;
753
754
/** @size: Size of the queried data */
755
__u32 size;
756
757
/** @data: Queried data is placed here */
758
__u64 data;
759
760
/** @reserved: Reserved */
761
__u64 reserved[2];
762
};
763
764
/**
765
* struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
766
* gem creation
767
*
768
* The @flags can be:
769
* - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - Modify the GEM object
770
* allocation strategy by deferring physical memory allocation
771
* until the object is either bound to a virtual memory region via
772
* VM_BIND or accessed by the CPU. As a result, no backing memory is
773
* reserved at the time of GEM object creation.
774
* - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
775
* - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
776
* possible placement, ensure that the corresponding VRAM allocation
777
* will always use the CPU accessible part of VRAM. This is important
778
* for small-bar systems (on full-bar systems this gets turned into a
779
* noop).
780
* Note1: System memory can be used as an extra placement if the kernel
781
* should spill the allocation to system memory, if space can't be made
782
* available in the CPU accessible part of VRAM (giving the same
783
* behaviour as the i915 interface, see
784
* I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
785
* Note2: For clear-color CCS surfaces the kernel needs to read the
786
* clear-color value stored in the buffer, and on discrete platforms we
787
* need to use VRAM for display surfaces, therefore the kernel requires
788
* setting this flag for such objects, otherwise an error is thrown on
789
* small-bar systems.
790
*
791
* @cpu_caching supports the following values:
792
* - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
793
* caching. On iGPU this can't be used for scanout surfaces. Currently
794
* not allowed for objects placed in VRAM.
795
* - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
796
* is uncached. Scanout surfaces should likely use this. All objects
797
* that can be placed in VRAM must use this.
798
*
799
* This ioctl supports setting the following properties via the
800
* %DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY extension, which uses the
801
* generic @drm_xe_ext_set_property struct:
802
*
803
* - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
804
* this object will be used with. Valid values are listed in enum
805
* drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
806
* there is no need to explicitly set that. Objects used with session of type
807
* %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation
808
* event occurs after their creation. Attempting to flip an invalid object
809
* will cause a black frame to be displayed instead. Submissions with invalid
810
* objects mapped in the VM will be rejected.
811
*/
812
struct drm_xe_gem_create {
813
#define DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY 0
814
#define DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE 0
815
/** @extensions: Pointer to the first extension struct, if any */
816
__u64 extensions;
817
818
/**
819
* @size: Size of the object to be created, must match region
820
* (system or vram) minimum alignment (&min_page_size).
821
*/
822
__u64 size;
823
824
/**
825
* @placement: A mask of memory instances of where BO can be placed.
826
* Each index in this mask refers directly to the struct
827
* drm_xe_query_mem_regions' instance, no assumptions should
828
* be made about order. The type of each region is described
829
* by struct drm_xe_query_mem_regions' mem_class.
830
*/
831
__u32 placement;
832
833
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
834
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
835
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
836
/**
837
* @flags: Flags, currently a mask of memory instances of where BO can
838
* be placed
839
*/
840
__u32 flags;
841
842
/**
843
* @vm_id: Attached VM, if any
844
*
845
* If a VM is specified, this BO must:
846
*
847
* 1. Only ever be bound to that VM.
848
* 2. Cannot be exported as a PRIME fd.
849
*/
850
__u32 vm_id;
851
852
/**
853
* @handle: Returned handle for the object.
854
*
855
* Object handles are nonzero.
856
*/
857
__u32 handle;
858
859
#define DRM_XE_GEM_CPU_CACHING_WB 1
860
#define DRM_XE_GEM_CPU_CACHING_WC 2
861
/**
862
* @cpu_caching: The CPU caching mode to select for this object. If
863
* mmaping the object the mode selected here will also be used. The
864
* exception is when mapping system memory (including data evicted
865
* to system) on discrete GPUs. The caching mode selected will
866
* then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency
867
* between GPU- and CPU is guaranteed. The caching mode of
868
* existing CPU-mappings will be updated transparently to
869
* user-space clients.
870
*/
871
__u16 cpu_caching;
872
/** @pad: MBZ */
873
__u16 pad[3];
874
875
/** @reserved: Reserved */
876
__u64 reserved[2];
877
};
878
879
/**
880
* struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
881
*
882
* The @flags can be:
883
* - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset
884
* for use in mmap ioctl. Writing to the returned mmap address will generate a
885
* PCI memory barrier with low overhead (avoiding IOCTL call as well as writing
886
* to VRAM which would also add overhead), acting like an MI_MEM_FENCE
887
* instruction.
888
*
889
* Note: The mmap size can be at most 4K, due to HW limitations. As a result
890
* this interface is only supported on CPU architectures that support 4K page
891
* size. The mmap_offset ioctl will detect this and gracefully return an
892
* error, where userspace is expected to have a different fallback method for
893
* triggering a barrier.
894
*
895
* Roughly the usage would be as follows:
896
*
897
* .. code-block:: C
898
*
899
* struct drm_xe_gem_mmap_offset mmo = {
900
* .handle = 0, // must be set to 0
901
* .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER,
902
* };
903
*
904
* err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo);
905
* map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset);
906
* map[i] = 0xdeadbeaf; // issue barrier
907
*/
908
struct drm_xe_gem_mmap_offset {
909
/** @extensions: Pointer to the first extension struct, if any */
910
__u64 extensions;
911
912
/** @handle: Handle for the object being mapped. */
913
__u32 handle;
914
915
#define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER (1 << 0)
916
/** @flags: Flags */
917
__u32 flags;
918
919
/** @offset: The fake offset to use for subsequent mmap call */
920
__u64 offset;
921
922
/** @reserved: Reserved */
923
__u64 reserved[2];
924
};
925
926
/**
927
* struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
928
*
929
* The @flags can be:
930
* - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
931
* space of the VM to scratch page. A vm_bind would overwrite the scratch
932
* page mapping. This flag is mutually exclusive with the
933
* %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
934
* xe3 platform.
935
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
936
* exec submissions to its exec_queues that don't have an upper time
937
* limit on the job execution time. But exec submissions to these
938
* don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
939
* DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
940
* together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
941
* LR VMs can be created in recoverable page-fault mode using
942
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
943
* If that flag is omitted, the UMD can not rely on the slightly
944
* different per-VM overcommit semantics that are enabled by
945
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
946
* still enable recoverable pagefaults if supported by the device.
947
* - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
948
* DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
949
* demand when accessed, and also allows per-VM overcommit of memory.
950
* The xe driver internally uses recoverable pagefaults to implement
951
* this.
952
*/
953
struct drm_xe_vm_create {
954
/** @extensions: Pointer to the first extension struct, if any */
955
__u64 extensions;
956
957
#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
958
#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
959
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
960
/** @flags: Flags */
961
__u32 flags;
962
963
/** @vm_id: Returned VM ID */
964
__u32 vm_id;
965
966
/** @reserved: Reserved */
967
__u64 reserved[2];
968
};
969
970
/**
971
* struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
972
*/
973
struct drm_xe_vm_destroy {
974
/** @vm_id: VM ID */
975
__u32 vm_id;
976
977
/** @pad: MBZ */
978
__u32 pad;
979
980
/** @reserved: Reserved */
981
__u64 reserved[2];
982
};
983
984
/**
985
* struct drm_xe_vm_bind_op - run bind operations
986
*
987
* The @op can be:
988
* - %DRM_XE_VM_BIND_OP_MAP
989
* - %DRM_XE_VM_BIND_OP_UNMAP
990
* - %DRM_XE_VM_BIND_OP_MAP_USERPTR
991
* - %DRM_XE_VM_BIND_OP_UNMAP_ALL
992
* - %DRM_XE_VM_BIND_OP_PREFETCH
993
*
994
* and the @flags can be:
995
* - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
996
* to ensure write protection
997
* - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
998
* MAP operation immediately rather than deferring the MAP to the page
999
* fault handler. This is implied on a non-faulting VM as there is no
1000
* fault handler to defer to.
1001
* - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
1002
* tables are setup with a special bit which indicates writes are
1003
* dropped and all reads return zero. In the future, the NULL flags
1004
* will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
1005
* handle MBZ, and the BO offset MBZ. This flag is intended to
1006
* implement VK sparse bindings.
1007
* - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
1008
* reject the binding if the encryption key is no longer valid. This
1009
* flag has no effect on BOs that are not marked as using PXP.
1010
* - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
1011
* set, no mappings are created rather the range is reserved for CPU address
1012
* mirroring which will be populated on GPU page faults or prefetches. Only
1013
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
1014
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
1015
* handle MBZ, and the BO offset MBZ.
1016
*
1017
* The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
1018
* - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
1019
* the memory region advised by madvise.
1020
*/
1021
struct drm_xe_vm_bind_op {
1022
/** @extensions: Pointer to the first extension struct, if any */
1023
__u64 extensions;
1024
1025
/**
1026
* @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
1027
*/
1028
__u32 obj;
1029
1030
/**
1031
* @pat_index: The platform defined @pat_index to use for this mapping.
1032
* The index basically maps to some predefined memory attributes,
1033
* including things like caching, coherency, compression etc. The exact
1034
* meaning of the pat_index is platform specific and defined in the
1035
* Bspec and PRMs. When the KMD sets up the binding the index here is
1036
* encoded into the ppGTT PTE.
1037
*
1038
* For coherency the @pat_index needs to be at least 1way coherent when
1039
* drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
1040
* will extract the coherency mode from the @pat_index and reject if
1041
* there is a mismatch (see note below for pre-MTL platforms).
1042
*
1043
* Note: On pre-MTL platforms there is only a caching mode and no
1044
* explicit coherency mode, but on such hardware there is always a
1045
* shared-LLC (or is dgpu) so all GT memory accesses are coherent with
1046
* CPU caches even with the caching mode set as uncached. It's only the
1047
* display engine that is incoherent (on dgpu it must be in VRAM which
1048
* is always mapped as WC on the CPU). However to keep the uapi somewhat
1049
* consistent with newer platforms the KMD groups the different cache
1050
* levels into the following coherency buckets on all pre-MTL platforms:
1051
*
1052
* ppGTT UC -> COH_NONE
1053
* ppGTT WC -> COH_NONE
1054
* ppGTT WT -> COH_NONE
1055
* ppGTT WB -> COH_AT_LEAST_1WAY
1056
*
1057
* In practice UC/WC/WT should only ever used for scanout surfaces on
1058
* such platforms (or perhaps in general for dma-buf if shared with
1059
* another device) since it is only the display engine that is actually
1060
* incoherent. Everything else should typically use WB given that we
1061
* have a shared-LLC. On MTL+ this completely changes and the HW
1062
* defines the coherency mode as part of the @pat_index, where
1063
* incoherent GT access is possible.
1064
*
1065
* Note: For userptr and externally imported dma-buf the kernel expects
1066
* either 1WAY or 2WAY for the @pat_index.
1067
*
1068
* For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
1069
* on the @pat_index. For such mappings there is no actual memory being
1070
* mapped (the address in the PTE is invalid), so the various PAT memory
1071
* attributes likely do not apply. Simply leaving as zero is one
1072
* option (still a valid pat_index). Same applies to
1073
* DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
1074
* there is no actual memory being mapped.
1075
*/
1076
__u16 pat_index;
1077
1078
/** @pad: MBZ */
1079
__u16 pad;
1080
1081
union {
1082
/**
1083
* @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
1084
* ignored for unbind
1085
*/
1086
__u64 obj_offset;
1087
1088
/** @userptr: user pointer to bind on */
1089
__u64 userptr;
1090
1091
/**
1092
* @cpu_addr_mirror_offset: Offset from GPU @addr to create
1093
* CPU address mirror mappings. MBZ with current level of
1094
* support (e.g. 1 to 1 mapping between GPU and CPU mappings
1095
* only supported).
1096
*/
1097
__s64 cpu_addr_mirror_offset;
1098
};
1099
1100
/**
1101
* @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
1102
*/
1103
__u64 range;
1104
1105
/** @addr: Address to operate on, MBZ for UNMAP_ALL */
1106
__u64 addr;
1107
1108
#define DRM_XE_VM_BIND_OP_MAP 0x0
1109
#define DRM_XE_VM_BIND_OP_UNMAP 0x1
1110
#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
1111
#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
1112
#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
1113
/** @op: Bind operation to perform */
1114
__u32 op;
1115
1116
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
1117
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
1118
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
1119
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
1120
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
1121
#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
1122
/** @flags: Bind flags */
1123
__u32 flags;
1124
1125
#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
1126
/**
1127
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
1128
* It is a region instance, not a mask.
1129
* To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
1130
*/
1131
__u32 prefetch_mem_region_instance;
1132
1133
/** @pad2: MBZ */
1134
__u32 pad2;
1135
1136
/** @reserved: Reserved */
1137
__u64 reserved[3];
1138
};
1139
1140
/**
1141
* struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
1142
*
1143
* Below is an example of a minimal use of @drm_xe_vm_bind to
1144
* asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
1145
* illustrate `userptr`. It can be synchronized by using the example
1146
* provided for @drm_xe_sync.
1147
*
1148
* .. code-block:: C
1149
*
1150
* data = aligned_alloc(ALIGNMENT, BO_SIZE);
1151
* struct drm_xe_vm_bind bind = {
1152
* .vm_id = vm,
1153
* .num_binds = 1,
1154
* .bind.obj = 0,
1155
* .bind.obj_offset = to_user_pointer(data),
1156
* .bind.range = BO_SIZE,
1157
* .bind.addr = BIND_ADDRESS,
1158
* .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
1159
* .bind.flags = 0,
1160
* .num_syncs = 1,
1161
* .syncs = &sync,
1162
* .exec_queue_id = 0,
1163
* };
1164
* ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
1165
*
1166
*/
1167
struct drm_xe_vm_bind {
1168
/** @extensions: Pointer to the first extension struct, if any */
1169
__u64 extensions;
1170
1171
/** @vm_id: The ID of the VM to bind to */
1172
__u32 vm_id;
1173
1174
/**
1175
* @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
1176
* and exec queue must have same vm_id. If zero, the default VM bind engine
1177
* is used.
1178
*/
1179
__u32 exec_queue_id;
1180
1181
/** @pad: MBZ */
1182
__u32 pad;
1183
1184
/** @num_binds: number of binds in this IOCTL */
1185
__u32 num_binds;
1186
1187
union {
1188
/** @bind: used if num_binds == 1 */
1189
struct drm_xe_vm_bind_op bind;
1190
1191
/**
1192
* @vector_of_binds: userptr to array of struct
1193
* drm_xe_vm_bind_op if num_binds > 1
1194
*/
1195
__u64 vector_of_binds;
1196
};
1197
1198
/** @pad2: MBZ */
1199
__u32 pad2;
1200
1201
/** @num_syncs: amount of syncs to wait on */
1202
__u32 num_syncs;
1203
1204
/** @syncs: pointer to struct drm_xe_sync array */
1205
__u64 syncs;
1206
1207
/** @reserved: Reserved */
1208
__u64 reserved[2];
1209
};
1210
1211
/**
1212
* struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
1213
*
1214
* This ioctl supports setting the following properties via the
1215
* %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the
1216
* generic @drm_xe_ext_set_property struct:
1217
*
1218
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority.
1219
* CAP_SYS_NICE is required to set a value above normal.
1220
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice
1221
* duration in microseconds.
1222
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
1223
* this queue will be used with. Valid values are listed in enum
1224
* drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
1225
* there is no need to explicitly set that. When a queue of type
1226
* %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session
1227
* (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running.
1228
* The user is expected to query the PXP status via the query ioctl (see
1229
* %DRM_XE_DEVICE_QUERY_PXP_STATUS) and to wait for PXP to be ready before
1230
* attempting to create a queue with this property. When a queue is created
1231
* before PXP is ready, the ioctl will return -EBUSY if init is still in
1232
* progress or -EIO if init failed.
1233
* Given that going into a power-saving state kills PXP HWDRM sessions,
1234
* runtime PM will be blocked while queues of this type are alive.
1235
* All PXP queues will be killed if a PXP invalidation event occurs.
1236
*
1237
* The example below shows how to use @drm_xe_exec_queue_create to create
1238
* a simple exec_queue (no parallel submission) of class
1239
* &DRM_XE_ENGINE_CLASS_RENDER.
1240
*
1241
* .. code-block:: C
1242
*
1243
* struct drm_xe_engine_class_instance instance = {
1244
* .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
1245
* };
1246
* struct drm_xe_exec_queue_create exec_queue_create = {
1247
* .extensions = 0,
1248
* .vm_id = vm,
1249
* .num_bb_per_exec = 1,
1250
* .num_eng_per_bb = 1,
1251
* .instances = to_user_pointer(&instance),
1252
* };
1253
* ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
1254
*
1255
* Allow users to provide a hint to kernel for cases demanding low latency
1256
* profile. Please note it will have impact on power consumption. User can
1257
* indicate low latency hint with flag while creating exec queue as
1258
* mentioned below,
1259
*
1260
* struct drm_xe_exec_queue_create exec_queue_create = {
1261
* .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
1262
* .extensions = 0,
1263
* .vm_id = vm,
1264
* .num_bb_per_exec = 1,
1265
* .num_eng_per_bb = 1,
1266
* .instances = to_user_pointer(&instance),
1267
* };
1268
* ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
1269
*
1270
*/
1271
struct drm_xe_exec_queue_create {
1272
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
1273
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
1274
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
1275
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2
1276
/** @extensions: Pointer to the first extension struct, if any */
1277
__u64 extensions;
1278
1279
/** @width: submission width (number BB per exec) for this exec queue */
1280
__u16 width;
1281
1282
/** @num_placements: number of valid placements for this exec queue */
1283
__u16 num_placements;
1284
1285
/** @vm_id: VM to use for this exec queue */
1286
__u32 vm_id;
1287
1288
#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
1289
/** @flags: flags to use for this exec queue */
1290
__u32 flags;
1291
1292
/** @exec_queue_id: Returned exec queue ID */
1293
__u32 exec_queue_id;
1294
1295
/**
1296
* @instances: user pointer to a 2-d array of struct
1297
* drm_xe_engine_class_instance
1298
*
1299
* length = width (i) * num_placements (j)
1300
* index = j + i * width
1301
*/
1302
__u64 instances;
1303
1304
/** @reserved: Reserved */
1305
__u64 reserved[2];
1306
};
1307
1308
/**
1309
* struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
1310
*/
1311
struct drm_xe_exec_queue_destroy {
1312
/** @exec_queue_id: Exec queue ID */
1313
__u32 exec_queue_id;
1314
1315
/** @pad: MBZ */
1316
__u32 pad;
1317
1318
/** @reserved: Reserved */
1319
__u64 reserved[2];
1320
};
1321
1322
/**
1323
* struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
1324
*
1325
* The @property can be:
1326
* - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
1327
*/
1328
struct drm_xe_exec_queue_get_property {
1329
/** @extensions: Pointer to the first extension struct, if any */
1330
__u64 extensions;
1331
1332
/** @exec_queue_id: Exec queue ID */
1333
__u32 exec_queue_id;
1334
1335
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
1336
/** @property: property to get */
1337
__u32 property;
1338
1339
/** @value: property value */
1340
__u64 value;
1341
1342
/** @reserved: Reserved */
1343
__u64 reserved[2];
1344
};
1345
1346
/**
1347
* struct drm_xe_sync - sync object
1348
*
1349
* The @type can be:
1350
* - %DRM_XE_SYNC_TYPE_SYNCOBJ
1351
* - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
1352
* - %DRM_XE_SYNC_TYPE_USER_FENCE
1353
*
1354
* and the @flags can be:
1355
* - %DRM_XE_SYNC_FLAG_SIGNAL
1356
*
1357
* A minimal use of @drm_xe_sync looks like this:
1358
*
1359
* .. code-block:: C
1360
*
1361
* struct drm_xe_sync sync = {
1362
* .flags = DRM_XE_SYNC_FLAG_SIGNAL,
1363
* .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
1364
* };
1365
* struct drm_syncobj_create syncobj_create = { 0 };
1366
* ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
1367
* sync.handle = syncobj_create.handle;
1368
* ...
1369
* use of &sync in drm_xe_exec or drm_xe_vm_bind
1370
* ...
1371
* struct drm_syncobj_wait wait = {
1372
* .handles = &sync.handle,
1373
* .timeout_nsec = INT64_MAX,
1374
* .count_handles = 1,
1375
* .flags = 0,
1376
* .first_signaled = 0,
1377
* .pad = 0,
1378
* };
1379
* ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
1380
*/
1381
struct drm_xe_sync {
1382
/** @extensions: Pointer to the first extension struct, if any */
1383
__u64 extensions;
1384
1385
#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
1386
#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
1387
#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
1388
/** @type: Type of the this sync object */
1389
__u32 type;
1390
1391
#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
1392
/** @flags: Sync Flags */
1393
__u32 flags;
1394
1395
union {
1396
/** @handle: Handle for the object */
1397
__u32 handle;
1398
1399
/**
1400
* @addr: Address of user fence. When sync is passed in via exec
1401
* IOCTL this is a GPU address in the VM. When sync passed in via
1402
* VM bind IOCTL this is a user pointer. In either case, it is
1403
* the users responsibility that this address is present and
1404
* mapped when the user fence is signalled. Must be qword
1405
* aligned.
1406
*/
1407
__u64 addr;
1408
};
1409
1410
/**
1411
* @timeline_value: Input for the timeline sync object. Needs to be
1412
* different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
1413
*/
1414
__u64 timeline_value;
1415
1416
/** @reserved: Reserved */
1417
__u64 reserved[2];
1418
};
1419
1420
/**
1421
* struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
1422
*
1423
* This is an example to use @drm_xe_exec for execution of the object
1424
* at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
1425
* (see example in @drm_xe_exec_queue_create). It can be synchronized
1426
* by using the example provided for @drm_xe_sync.
1427
*
1428
* .. code-block:: C
1429
*
1430
* struct drm_xe_exec exec = {
1431
* .exec_queue_id = exec_queue,
1432
* .syncs = &sync,
1433
* .num_syncs = 1,
1434
* .address = BIND_ADDRESS,
1435
* .num_batch_buffer = 1,
1436
* };
1437
* ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
1438
*
1439
*/
1440
struct drm_xe_exec {
1441
/** @extensions: Pointer to the first extension struct, if any */
1442
__u64 extensions;
1443
1444
/** @exec_queue_id: Exec queue ID for the batch buffer */
1445
__u32 exec_queue_id;
1446
1447
/** @num_syncs: Amount of struct drm_xe_sync in array. */
1448
__u32 num_syncs;
1449
1450
/** @syncs: Pointer to struct drm_xe_sync array. */
1451
__u64 syncs;
1452
1453
/**
1454
* @address: address of batch buffer if num_batch_buffer == 1 or an
1455
* array of batch buffer addresses
1456
*/
1457
__u64 address;
1458
1459
/**
1460
* @num_batch_buffer: number of batch buffer in this exec, must match
1461
* the width of the engine
1462
*/
1463
__u16 num_batch_buffer;
1464
1465
/** @pad: MBZ */
1466
__u16 pad[3];
1467
1468
/** @reserved: Reserved */
1469
__u64 reserved[2];
1470
};
1471
1472
/**
1473
* struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
1474
*
1475
* Wait on user fence, XE will wake-up on every HW engine interrupt in the
1476
* instances list and check if user fence is complete::
1477
*
1478
* (*addr & MASK) OP (VALUE & MASK)
1479
*
1480
* Returns to user on user fence completion or timeout.
1481
*
1482
* The @op can be:
1483
* - %DRM_XE_UFENCE_WAIT_OP_EQ
1484
* - %DRM_XE_UFENCE_WAIT_OP_NEQ
1485
* - %DRM_XE_UFENCE_WAIT_OP_GT
1486
* - %DRM_XE_UFENCE_WAIT_OP_GTE
1487
* - %DRM_XE_UFENCE_WAIT_OP_LT
1488
* - %DRM_XE_UFENCE_WAIT_OP_LTE
1489
*
1490
* and the @flags can be:
1491
* - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
1492
* - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
1493
*
1494
* The @mask values can be for example:
1495
* - 0xffu for u8
1496
* - 0xffffu for u16
1497
* - 0xffffffffu for u32
1498
* - 0xffffffffffffffffu for u64
1499
*/
1500
struct drm_xe_wait_user_fence {
1501
/** @extensions: Pointer to the first extension struct, if any */
1502
__u64 extensions;
1503
1504
/**
1505
* @addr: user pointer address to wait on, must qword aligned
1506
*/
1507
__u64 addr;
1508
1509
#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
1510
#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
1511
#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
1512
#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
1513
#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
1514
#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
1515
/** @op: wait operation (type of comparison) */
1516
__u16 op;
1517
1518
#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
1519
/** @flags: wait flags */
1520
__u16 flags;
1521
1522
/** @pad: MBZ */
1523
__u32 pad;
1524
1525
/** @value: compare value */
1526
__u64 value;
1527
1528
/** @mask: comparison mask */
1529
__u64 mask;
1530
1531
/**
1532
* @timeout: how long to wait before bailing, value in nanoseconds.
1533
* Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
1534
* it contains timeout expressed in nanoseconds to wait (fence will
1535
* expire at now() + timeout).
1536
* When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
1537
* will end at timeout (uses system MONOTONIC_CLOCK).
1538
* Passing negative timeout leads to neverending wait.
1539
*
1540
* On relative timeout this value is updated with timeout left
1541
* (for restarting the call in case of signal delivery).
1542
* On absolute timeout this value stays intact (restarted call still
1543
* expire at the same point of time).
1544
*/
1545
__s64 timeout;
1546
1547
/** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
1548
__u32 exec_queue_id;
1549
1550
/** @pad2: MBZ */
1551
__u32 pad2;
1552
1553
/** @reserved: Reserved */
1554
__u64 reserved[2];
1555
};
1556
1557
/**
1558
* enum drm_xe_observation_type - Observation stream types
1559
*/
1560
enum drm_xe_observation_type {
1561
/** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */
1562
DRM_XE_OBSERVATION_TYPE_OA,
1563
/** @DRM_XE_OBSERVATION_TYPE_EU_STALL: EU stall sampling observation stream type */
1564
DRM_XE_OBSERVATION_TYPE_EU_STALL,
1565
};
1566
1567
/**
1568
* enum drm_xe_observation_op - Observation stream ops
1569
*/
1570
enum drm_xe_observation_op {
1571
/** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */
1572
DRM_XE_OBSERVATION_OP_STREAM_OPEN,
1573
1574
/** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */
1575
DRM_XE_OBSERVATION_OP_ADD_CONFIG,
1576
1577
/** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */
1578
DRM_XE_OBSERVATION_OP_REMOVE_CONFIG,
1579
};
1580
1581
/**
1582
* struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION
1583
*
1584
* The observation layer enables multiplexing observation streams of
1585
* multiple types. The actual params for a particular stream operation are
1586
* supplied via the @param pointer (use __copy_from_user to get these
1587
* params).
1588
*/
1589
struct drm_xe_observation_param {
1590
/** @extensions: Pointer to the first extension struct, if any */
1591
__u64 extensions;
1592
/** @observation_type: observation stream type, of enum @drm_xe_observation_type */
1593
__u64 observation_type;
1594
/** @observation_op: observation stream op, of enum @drm_xe_observation_op */
1595
__u64 observation_op;
1596
/** @param: Pointer to actual stream params */
1597
__u64 param;
1598
};
1599
1600
/**
1601
* enum drm_xe_observation_ioctls - Observation stream fd ioctl's
1602
*
1603
* Information exchanged between userspace and kernel for observation fd
1604
* ioctl's is stream type specific
1605
*/
1606
enum drm_xe_observation_ioctls {
1607
/** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */
1608
DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0),
1609
1610
/** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */
1611
DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1),
1612
1613
/** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */
1614
DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2),
1615
1616
/** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */
1617
DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3),
1618
1619
/** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */
1620
DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4),
1621
};
1622
1623
/**
1624
* enum drm_xe_oa_unit_type - OA unit types
1625
*/
1626
enum drm_xe_oa_unit_type {
1627
/**
1628
* @DRM_XE_OA_UNIT_TYPE_OAG: OAG OA unit. OAR/OAC are considered
1629
* sub-types of OAG. For OAR/OAC, use OAG.
1630
*/
1631
DRM_XE_OA_UNIT_TYPE_OAG,
1632
1633
/** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */
1634
DRM_XE_OA_UNIT_TYPE_OAM,
1635
1636
/** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
1637
DRM_XE_OA_UNIT_TYPE_OAM_SAG,
1638
};
1639
1640
/**
1641
* struct drm_xe_oa_unit - describe OA unit
1642
*/
1643
struct drm_xe_oa_unit {
1644
/** @extensions: Pointer to the first extension struct, if any */
1645
__u64 extensions;
1646
1647
/** @oa_unit_id: OA unit ID */
1648
__u32 oa_unit_id;
1649
1650
/** @oa_unit_type: OA unit type of @drm_xe_oa_unit_type */
1651
__u32 oa_unit_type;
1652
1653
/** @capabilities: OA capabilities bit-mask */
1654
__u64 capabilities;
1655
#define DRM_XE_OA_CAPS_BASE (1 << 0)
1656
#define DRM_XE_OA_CAPS_SYNCS (1 << 1)
1657
#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
1658
#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
1659
#define DRM_XE_OA_CAPS_OAM (1 << 4)
1660
1661
/** @oa_timestamp_freq: OA timestamp freq */
1662
__u64 oa_timestamp_freq;
1663
1664
/** @reserved: MBZ */
1665
__u64 reserved[4];
1666
1667
/** @num_engines: number of engines in @eci array */
1668
__u64 num_engines;
1669
1670
/** @eci: engines attached to this OA unit */
1671
struct drm_xe_engine_class_instance eci[];
1672
};
1673
1674
/**
1675
* struct drm_xe_query_oa_units - describe OA units
1676
*
1677
* If a query is made with a struct drm_xe_device_query where .query
1678
* is equal to DRM_XE_DEVICE_QUERY_OA_UNITS, then the reply uses struct
1679
* drm_xe_query_oa_units in .data.
1680
*
1681
* OA unit properties for all OA units can be accessed using a code block
1682
* such as the one below:
1683
*
1684
* .. code-block:: C
1685
*
1686
* struct drm_xe_query_oa_units *qoa;
1687
* struct drm_xe_oa_unit *oau;
1688
* u8 *poau;
1689
*
1690
* // malloc qoa and issue DRM_XE_DEVICE_QUERY_OA_UNITS. Then:
1691
* poau = (u8 *)&qoa->oa_units[0];
1692
* for (int i = 0; i < qoa->num_oa_units; i++) {
1693
* oau = (struct drm_xe_oa_unit *)poau;
1694
* // Access 'struct drm_xe_oa_unit' fields here
1695
* poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]);
1696
* }
1697
*/
1698
struct drm_xe_query_oa_units {
1699
/** @extensions: Pointer to the first extension struct, if any */
1700
__u64 extensions;
1701
/** @num_oa_units: number of OA units returned in oau[] */
1702
__u32 num_oa_units;
1703
/** @pad: MBZ */
1704
__u32 pad;
1705
/**
1706
* @oa_units: struct @drm_xe_oa_unit array returned for this device.
1707
* Written below as a u64 array to avoid problems with nested flexible
1708
* arrays with some compilers
1709
*/
1710
__u64 oa_units[];
1711
};
1712
1713
/**
1714
* enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec
1715
* 52198/60942
1716
*/
1717
enum drm_xe_oa_format_type {
1718
/** @DRM_XE_OA_FMT_TYPE_OAG: OAG report format */
1719
DRM_XE_OA_FMT_TYPE_OAG,
1720
/** @DRM_XE_OA_FMT_TYPE_OAR: OAR report format */
1721
DRM_XE_OA_FMT_TYPE_OAR,
1722
/** @DRM_XE_OA_FMT_TYPE_OAM: OAM report format */
1723
DRM_XE_OA_FMT_TYPE_OAM,
1724
/** @DRM_XE_OA_FMT_TYPE_OAC: OAC report format */
1725
DRM_XE_OA_FMT_TYPE_OAC,
1726
/** @DRM_XE_OA_FMT_TYPE_OAM_MPEC: OAM SAMEDIA or OAM MPEC report format */
1727
DRM_XE_OA_FMT_TYPE_OAM_MPEC,
1728
/** @DRM_XE_OA_FMT_TYPE_PEC: PEC report format */
1729
DRM_XE_OA_FMT_TYPE_PEC,
1730
};
1731
1732
/**
1733
* enum drm_xe_oa_property_id - OA stream property id's
1734
*
1735
* Stream params are specified as a chain of @drm_xe_ext_set_property
1736
* struct's, with @property values from enum @drm_xe_oa_property_id and
1737
* @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY.
1738
* @param field in struct @drm_xe_observation_param points to the first
1739
* @drm_xe_ext_set_property struct.
1740
*
1741
* Exactly the same mechanism is also used for stream reconfiguration using the
1742
* @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a
1743
* subset of properties below can be specified for stream reconfiguration.
1744
*/
1745
enum drm_xe_oa_property_id {
1746
#define DRM_XE_OA_EXTENSION_SET_PROPERTY 0
1747
/**
1748
* @DRM_XE_OA_PROPERTY_OA_UNIT_ID: ID of the OA unit on which to open
1749
* the OA stream, see @oa_unit_id in 'struct
1750
* drm_xe_query_oa_units'. Defaults to 0 if not provided.
1751
*/
1752
DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1,
1753
1754
/**
1755
* @DRM_XE_OA_PROPERTY_SAMPLE_OA: A value of 1 requests inclusion of raw
1756
* OA unit reports or stream samples in a global buffer attached to an
1757
* OA unit.
1758
*/
1759
DRM_XE_OA_PROPERTY_SAMPLE_OA,
1760
1761
/**
1762
* @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA
1763
* reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG.
1764
*/
1765
DRM_XE_OA_PROPERTY_OA_METRIC_SET,
1766
1767
/** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */
1768
DRM_XE_OA_PROPERTY_OA_FORMAT,
1769
/*
1770
* OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942,
1771
* in terms of the following quantities: a. enum @drm_xe_oa_format_type
1772
* b. Counter select c. Counter size and d. BC report. Also refer to the
1773
* oa_formats array in drivers/gpu/drm/xe/xe_oa.c.
1774
*/
1775
#define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0)
1776
#define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8)
1777
#define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16)
1778
#define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24)
1779
1780
/**
1781
* @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit
1782
* sampling with sampling frequency proportional to 2^(period_exponent + 1)
1783
*/
1784
DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT,
1785
1786
/**
1787
* @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA
1788
* stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE).
1789
*/
1790
DRM_XE_OA_PROPERTY_OA_DISABLED,
1791
1792
/**
1793
* @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific
1794
* @exec_queue_id. OA queries can be executed on this exec queue.
1795
*/
1796
DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID,
1797
1798
/**
1799
* @DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE: Optional engine instance to
1800
* pass along with @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID or will default to 0.
1801
*/
1802
DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE,
1803
1804
/**
1805
* @DRM_XE_OA_PROPERTY_NO_PREEMPT: Allow preemption and timeslicing
1806
* to be disabled for the stream exec queue.
1807
*/
1808
DRM_XE_OA_PROPERTY_NO_PREEMPT,
1809
1810
/**
1811
* @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array
1812
* specified in @DRM_XE_OA_PROPERTY_SYNCS
1813
*/
1814
DRM_XE_OA_PROPERTY_NUM_SYNCS,
1815
1816
/**
1817
* @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array
1818
* with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA
1819
* configuration will wait till input fences signal. Output fences
1820
* will signal after the new OA configuration takes effect. For
1821
* @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar
1822
* to the VM bind case.
1823
*/
1824
DRM_XE_OA_PROPERTY_SYNCS,
1825
1826
/**
1827
* @DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE: Size of OA buffer to be
1828
* allocated by the driver in bytes. Supported sizes are powers of
1829
* 2 from 128 KiB to 128 MiB. When not specified, a 16 MiB OA
1830
* buffer is allocated by default.
1831
*/
1832
DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE,
1833
1834
/**
1835
* @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait
1836
* for before unblocking poll or read
1837
*/
1838
DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS,
1839
};
1840
1841
/**
1842
* struct drm_xe_oa_config - OA metric configuration
1843
*
1844
* Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A
1845
* particular config can be specified when opening an OA stream using
1846
* @DRM_XE_OA_PROPERTY_OA_METRIC_SET property.
1847
*/
1848
struct drm_xe_oa_config {
1849
/** @extensions: Pointer to the first extension struct, if any */
1850
__u64 extensions;
1851
1852
/** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */
1853
char uuid[36];
1854
1855
/** @n_regs: Number of regs in @regs_ptr */
1856
__u32 n_regs;
1857
1858
/**
1859
* @regs_ptr: Pointer to (register address, value) pairs for OA config
1860
* registers. Expected length of buffer is: (2 * sizeof(u32) * @n_regs).
1861
*/
1862
__u64 regs_ptr;
1863
};
1864
1865
/**
1866
* struct drm_xe_oa_stream_status - OA stream status returned from
1867
* @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can
1868
* call the ioctl to query stream status in response to EIO errno from
1869
* observation fd read().
1870
*/
1871
struct drm_xe_oa_stream_status {
1872
/** @extensions: Pointer to the first extension struct, if any */
1873
__u64 extensions;
1874
1875
/** @oa_status: OA stream status (see Bspec 46717/61226) */
1876
__u64 oa_status;
1877
#define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL (1 << 3)
1878
#define DRM_XE_OASTATUS_COUNTER_OVERFLOW (1 << 2)
1879
#define DRM_XE_OASTATUS_BUFFER_OVERFLOW (1 << 1)
1880
#define DRM_XE_OASTATUS_REPORT_LOST (1 << 0)
1881
1882
/** @reserved: reserved for future use */
1883
__u64 reserved[3];
1884
};
1885
1886
/**
1887
* struct drm_xe_oa_stream_info - OA stream info returned from
1888
* @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl
1889
*/
1890
struct drm_xe_oa_stream_info {
1891
/** @extensions: Pointer to the first extension struct, if any */
1892
__u64 extensions;
1893
1894
/** @oa_buf_size: OA buffer size */
1895
__u64 oa_buf_size;
1896
1897
/** @reserved: reserved for future use */
1898
__u64 reserved[3];
1899
};
1900
1901
/**
1902
* enum drm_xe_pxp_session_type - Supported PXP session types.
1903
*
1904
* We currently only support HWDRM sessions, which are used for protected
1905
* content that ends up being displayed, but the HW supports multiple types, so
1906
* we might extend support in the future.
1907
*/
1908
enum drm_xe_pxp_session_type {
1909
/** @DRM_XE_PXP_TYPE_NONE: PXP not used */
1910
DRM_XE_PXP_TYPE_NONE = 0,
1911
/**
1912
* @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends
1913
* up on the display.
1914
*/
1915
DRM_XE_PXP_TYPE_HWDRM = 1,
1916
};
1917
1918
/* ID of the protected content session managed by Xe when PXP is active */
1919
#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf
1920
1921
/**
1922
* enum drm_xe_eu_stall_property_id - EU stall sampling input property ids.
1923
*
1924
* These properties are passed to the driver at open as a chain of
1925
* @drm_xe_ext_set_property structures with @property set to these
1926
* properties' enums and @value set to the corresponding values of these
1927
* properties. @drm_xe_user_extension base.name should be set to
1928
* @DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY.
1929
*
1930
* With the file descriptor obtained from open, user space must enable
1931
* the EU stall stream fd with @DRM_XE_OBSERVATION_IOCTL_ENABLE before
1932
* calling read(). EIO errno from read() indicates HW dropped data
1933
* due to full buffer.
1934
*/
1935
enum drm_xe_eu_stall_property_id {
1936
#define DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY 0
1937
/**
1938
* @DRM_XE_EU_STALL_PROP_GT_ID: @gt_id of the GT on which
1939
* EU stall data will be captured.
1940
*/
1941
DRM_XE_EU_STALL_PROP_GT_ID = 1,
1942
1943
/**
1944
* @DRM_XE_EU_STALL_PROP_SAMPLE_RATE: Sampling rate in
1945
* GPU cycles from @sampling_rates in struct @drm_xe_query_eu_stall
1946
*/
1947
DRM_XE_EU_STALL_PROP_SAMPLE_RATE,
1948
1949
/**
1950
* @DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS: Minimum number of
1951
* EU stall data reports to be present in the kernel buffer
1952
* before unblocking a blocked poll or read.
1953
*/
1954
DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS,
1955
};
1956
1957
/**
1958
* struct drm_xe_query_eu_stall - Information about EU stall sampling.
1959
*
1960
* If a query is made with a struct @drm_xe_device_query where .query
1961
* is equal to @DRM_XE_DEVICE_QUERY_EU_STALL, then the reply uses
1962
* struct @drm_xe_query_eu_stall in .data.
1963
*/
1964
struct drm_xe_query_eu_stall {
1965
/** @extensions: Pointer to the first extension struct, if any */
1966
__u64 extensions;
1967
1968
/** @capabilities: EU stall capabilities bit-mask */
1969
__u64 capabilities;
1970
#define DRM_XE_EU_STALL_CAPS_BASE (1 << 0)
1971
1972
/** @record_size: size of each EU stall data record */
1973
__u64 record_size;
1974
1975
/** @per_xecore_buf_size: internal per XeCore buffer size */
1976
__u64 per_xecore_buf_size;
1977
1978
/** @reserved: Reserved */
1979
__u64 reserved[5];
1980
1981
/** @num_sampling_rates: Number of sampling rates in @sampling_rates array */
1982
__u64 num_sampling_rates;
1983
1984
/**
1985
* @sampling_rates: Flexible array of sampling rates
1986
* sorted in the fastest to slowest order.
1987
* Sampling rates are specified in GPU clock cycles.
1988
*/
1989
__u64 sampling_rates[];
1990
};
1991
1992
/**
1993
* struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
1994
*
1995
* This structure is used to set memory attributes for a virtual address range
1996
* in a VM. The type of attribute is specified by @type, and the corresponding
1997
* union member is used to provide additional parameters for @type.
1998
*
1999
* Supported attribute types:
2000
* - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
2001
* - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
2002
* - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
2003
*
2004
* Example:
2005
*
2006
* .. code-block:: C
2007
*
2008
* struct drm_xe_madvise madvise = {
2009
* .vm_id = vm_id,
2010
* .start = 0x100000,
2011
* .range = 0x2000,
2012
* .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
2013
* .atomic_val = DRM_XE_ATOMIC_DEVICE,
2014
* };
2015
*
2016
* ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
2017
*
2018
*/
2019
struct drm_xe_madvise {
2020
/** @extensions: Pointer to the first extension struct, if any */
2021
__u64 extensions;
2022
2023
/** @start: start of the virtual address range */
2024
__u64 start;
2025
2026
/** @range: size of the virtual address range */
2027
__u64 range;
2028
2029
/** @vm_id: vm_id of the virtual range */
2030
__u32 vm_id;
2031
2032
#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
2033
#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
2034
#define DRM_XE_MEM_RANGE_ATTR_PAT 2
2035
/** @type: type of attribute */
2036
__u32 type;
2037
2038
union {
2039
/**
2040
* @preferred_mem_loc: preferred memory location
2041
*
2042
* Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
2043
*
2044
* Supported values for @preferred_mem_loc.devmem_fd:
2045
* - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of fault tile as preferred loc
2046
* - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
2047
*
2048
* Supported values for @preferred_mem_loc.migration_policy:
2049
* - DRM_XE_MIGRATE_ALL_PAGES
2050
* - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
2051
*/
2052
struct {
2053
#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
2054
#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
2055
/** @preferred_mem_loc.devmem_fd: fd for preferred loc */
2056
__u32 devmem_fd;
2057
2058
#define DRM_XE_MIGRATE_ALL_PAGES 0
2059
#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1
2060
/** @preferred_mem_loc.migration_policy: Page migration policy */
2061
__u16 migration_policy;
2062
2063
/** @preferred_mem_loc.pad : MBZ */
2064
__u16 pad;
2065
2066
/** @preferred_mem_loc.reserved : Reserved */
2067
__u64 reserved;
2068
} preferred_mem_loc;
2069
2070
/**
2071
* @atomic: Atomic access policy
2072
*
2073
* Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
2074
*
2075
* Supported values for @atomic.val:
2076
* - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour.
2077
* Support both GPU and CPU atomic operations for system allocator.
2078
* Support GPU atomic operations for normal(bo) allocator.
2079
* - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations.
2080
* - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations.
2081
* - DRM_XE_ATOMIC_CPU: Support CPU atomic only, no GPU atomics supported.
2082
*/
2083
struct {
2084
#define DRM_XE_ATOMIC_UNDEFINED 0
2085
#define DRM_XE_ATOMIC_DEVICE 1
2086
#define DRM_XE_ATOMIC_GLOBAL 2
2087
#define DRM_XE_ATOMIC_CPU 3
2088
/** @atomic.val: value of atomic operation */
2089
__u32 val;
2090
2091
/** @atomic.pad: MBZ */
2092
__u32 pad;
2093
2094
/** @atomic.reserved: Reserved */
2095
__u64 reserved;
2096
} atomic;
2097
2098
/**
2099
* @pat_index: Page attribute table index
2100
*
2101
* Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
2102
*/
2103
struct {
2104
/** @pat_index.val: PAT index value */
2105
__u32 val;
2106
2107
/** @pat_index.pad: MBZ */
2108
__u32 pad;
2109
2110
/** @pat_index.reserved: Reserved */
2111
__u64 reserved;
2112
} pat_index;
2113
};
2114
2115
/** @reserved: Reserved */
2116
__u64 reserved[2];
2117
};
2118
2119
/**
2120
* struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
2121
*
2122
* This structure is provided by userspace and filled by KMD in response to the
2123
* DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
2124
* a memory ranges within a user specified address range in a VM.
2125
*
2126
* The structure includes information such as atomic access policy,
2127
* page attribute table (PAT) index, and preferred memory location.
2128
* Userspace allocates an array of these structures and passes a pointer to the
2129
* ioctl to retrieve attributes for each memory ranges
2130
*
2131
* @extensions: Pointer to the first extension struct, if any
2132
* @start: Start address of the memory range
2133
* @end: End address of the virtual memory range
2134
*
2135
*/
2136
struct drm_xe_mem_range_attr {
2137
/** @extensions: Pointer to the first extension struct, if any */
2138
__u64 extensions;
2139
2140
/** @start: start of the memory range */
2141
__u64 start;
2142
2143
/** @end: end of the memory range */
2144
__u64 end;
2145
2146
/** @preferred_mem_loc: preferred memory location */
2147
struct {
2148
/** @preferred_mem_loc.devmem_fd: fd for preferred loc */
2149
__u32 devmem_fd;
2150
2151
/** @preferred_mem_loc.migration_policy: Page migration policy */
2152
__u32 migration_policy;
2153
} preferred_mem_loc;
2154
2155
/** @atomic: Atomic access policy */
2156
struct {
2157
/** @atomic.val: atomic attribute */
2158
__u32 val;
2159
2160
/** @atomic.reserved: Reserved */
2161
__u32 reserved;
2162
} atomic;
2163
2164
/** @pat_index: Page attribute table index */
2165
struct {
2166
/** @pat_index.val: PAT index */
2167
__u32 val;
2168
2169
/** @pat_index.reserved: Reserved */
2170
__u32 reserved;
2171
} pat_index;
2172
2173
/** @reserved: Reserved */
2174
__u64 reserved[2];
2175
};
2176
2177
/**
2178
* struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
2179
*
2180
* This structure is used to query memory attributes of memory regions
2181
* within a user specified address range in a VM. It provides detailed
2182
* information about each memory range, including atomic access policy,
2183
* page attribute table (PAT) index, and preferred memory location.
2184
*
2185
* Userspace first calls the ioctl with @num_mem_ranges = 0,
2186
* @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
2187
* the number of memory regions and size of each memory range attribute.
2188
* Then, it allocates a buffer of that size and calls the ioctl again to fill
2189
* the buffer with memory range attributes.
2190
*
2191
* If second call fails with -ENOSPC, it means memory ranges changed between
2192
* first call and now, retry IOCTL again with @num_mem_ranges = 0,
2193
* @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
2194
* Second ioctl call.
2195
*
2196
* Example:
2197
*
2198
* .. code-block:: C
2199
*
2200
* struct drm_xe_vm_query_mem_range_attr query = {
2201
* .vm_id = vm_id,
2202
* .start = 0x100000,
2203
* .range = 0x2000,
2204
* };
2205
*
2206
* // First ioctl call to get num of mem regions and sizeof each attribute
2207
* ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
2208
*
2209
* // Allocate buffer for the memory region attributes
2210
* void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
2211
* void *ptr_start = ptr;
2212
*
2213
* query.vector_of_mem_attr = (uintptr_t)ptr;
2214
*
2215
* // Second ioctl call to actually fill the memory attributes
2216
* ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
2217
*
2218
* // Iterate over the returned memory region attributes
2219
* for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
2220
* struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
2221
*
2222
* // Do something with attr
2223
*
2224
* // Move pointer by one entry
2225
* ptr += query.sizeof_mem_range_attr;
2226
* }
2227
*
2228
* free(ptr_start);
2229
*/
2230
struct drm_xe_vm_query_mem_range_attr {
2231
/** @extensions: Pointer to the first extension struct, if any */
2232
__u64 extensions;
2233
2234
/** @vm_id: vm_id of the virtual range */
2235
__u32 vm_id;
2236
2237
/** @num_mem_ranges: number of mem_ranges in range */
2238
__u32 num_mem_ranges;
2239
2240
/** @start: start of the virtual address range */
2241
__u64 start;
2242
2243
/** @range: size of the virtual address range */
2244
__u64 range;
2245
2246
/** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
2247
__u64 sizeof_mem_range_attr;
2248
2249
/** @vector_of_mem_attr: userptr to array of struct drm_xe_mem_range_attr */
2250
__u64 vector_of_mem_attr;
2251
2252
/** @reserved: Reserved */
2253
__u64 reserved[2];
2254
2255
};
2256
2257
#if defined(__cplusplus)
2258
}
2259
#endif
2260
2261
#endif /* _UAPI_XE_DRM_H_ */
2262
2263