Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/uapi/drm/drm.h
29278 views
1
/*
2
* Header for the Direct Rendering Manager
3
*
4
* Author: Rickard E. (Rik) Faith <[email protected]>
5
*
6
* Acknowledgments:
7
* Dec 1999, Richard Henderson <[email protected]>, move to generic cmpxchg.
8
*/
9
10
/*
11
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13
* All rights reserved.
14
*
15
* Permission is hereby granted, free of charge, to any person obtaining a
16
* copy of this software and associated documentation files (the "Software"),
17
* to deal in the Software without restriction, including without limitation
18
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
19
* and/or sell copies of the Software, and to permit persons to whom the
20
* Software is furnished to do so, subject to the following conditions:
21
*
22
* The above copyright notice and this permission notice (including the next
23
* paragraph) shall be included in all copies or substantial portions of the
24
* Software.
25
*
26
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
29
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
30
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
31
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
32
* OTHER DEALINGS IN THE SOFTWARE.
33
*/
34
35
#ifndef _DRM_H_
36
#define _DRM_H_
37
38
#if defined(__KERNEL__)
39
40
#include <linux/types.h>
41
#include <asm/ioctl.h>
42
typedef unsigned int drm_handle_t;
43
44
#elif defined(__linux__)
45
46
#include <linux/types.h>
47
#include <asm/ioctl.h>
48
typedef unsigned int drm_handle_t;
49
50
#else /* One of the BSDs */
51
52
#include <stdint.h>
53
#include <sys/ioccom.h>
54
#include <sys/types.h>
55
typedef int8_t __s8;
56
typedef uint8_t __u8;
57
typedef int16_t __s16;
58
typedef uint16_t __u16;
59
typedef int32_t __s32;
60
typedef uint32_t __u32;
61
typedef int64_t __s64;
62
typedef uint64_t __u64;
63
typedef size_t __kernel_size_t;
64
typedef unsigned long drm_handle_t;
65
66
#endif
67
68
#if defined(__cplusplus)
69
extern "C" {
70
#endif
71
72
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
73
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
74
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
75
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
76
77
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
78
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
79
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
80
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
81
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
82
83
typedef unsigned int drm_context_t;
84
typedef unsigned int drm_drawable_t;
85
typedef unsigned int drm_magic_t;
86
87
/*
88
* Cliprect.
89
*
90
* \warning: If you change this structure, make sure you change
91
* XF86DRIClipRectRec in the server as well
92
*
93
* \note KW: Actually it's illegal to change either for
94
* backwards-compatibility reasons.
95
*/
96
struct drm_clip_rect {
97
unsigned short x1;
98
unsigned short y1;
99
unsigned short x2;
100
unsigned short y2;
101
};
102
103
/*
104
* Drawable information.
105
*/
106
struct drm_drawable_info {
107
unsigned int num_rects;
108
struct drm_clip_rect *rects;
109
};
110
111
/*
112
* Texture region,
113
*/
114
struct drm_tex_region {
115
unsigned char next;
116
unsigned char prev;
117
unsigned char in_use;
118
unsigned char padding;
119
unsigned int age;
120
};
121
122
/*
123
* Hardware lock.
124
*
125
* The lock structure is a simple cache-line aligned integer. To avoid
126
* processor bus contention on a multiprocessor system, there should not be any
127
* other data stored in the same cache line.
128
*/
129
struct drm_hw_lock {
130
__volatile__ unsigned int lock; /**< lock variable */
131
char padding[60]; /**< Pad to cache line */
132
};
133
134
/*
135
* DRM_IOCTL_VERSION ioctl argument type.
136
*
137
* \sa drmGetVersion().
138
*/
139
struct drm_version {
140
int version_major; /**< Major version */
141
int version_minor; /**< Minor version */
142
int version_patchlevel; /**< Patch level */
143
__kernel_size_t name_len; /**< Length of name buffer */
144
char __user *name; /**< Name of driver */
145
__kernel_size_t date_len; /**< Length of date buffer */
146
char __user *date; /**< User-space buffer to hold date */
147
__kernel_size_t desc_len; /**< Length of desc buffer */
148
char __user *desc; /**< User-space buffer to hold desc */
149
};
150
151
/*
152
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
153
*
154
* \sa drmGetBusid() and drmSetBusId().
155
*/
156
struct drm_unique {
157
__kernel_size_t unique_len; /**< Length of unique */
158
char __user *unique; /**< Unique name for driver instantiation */
159
};
160
161
struct drm_list {
162
int count; /**< Length of user-space structures */
163
struct drm_version __user *version;
164
};
165
166
struct drm_block {
167
int unused;
168
};
169
170
/*
171
* DRM_IOCTL_CONTROL ioctl argument type.
172
*
173
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
174
*/
175
struct drm_control {
176
enum {
177
DRM_ADD_COMMAND,
178
DRM_RM_COMMAND,
179
DRM_INST_HANDLER,
180
DRM_UNINST_HANDLER
181
} func;
182
int irq;
183
};
184
185
/*
186
* Type of memory to map.
187
*/
188
enum drm_map_type {
189
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
190
_DRM_REGISTERS = 1, /**< no caching, no core dump */
191
_DRM_SHM = 2, /**< shared, cached */
192
_DRM_AGP = 3, /**< AGP/GART */
193
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
194
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
195
};
196
197
/*
198
* Memory mapping flags.
199
*/
200
enum drm_map_flags {
201
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
202
_DRM_READ_ONLY = 0x02,
203
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
204
_DRM_KERNEL = 0x08, /**< kernel requires access */
205
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
206
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
207
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
208
_DRM_DRIVER = 0x80 /**< Managed by driver */
209
};
210
211
struct drm_ctx_priv_map {
212
unsigned int ctx_id; /**< Context requesting private mapping */
213
void *handle; /**< Handle of map */
214
};
215
216
/*
217
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
218
* argument type.
219
*
220
* \sa drmAddMap().
221
*/
222
struct drm_map {
223
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
224
unsigned long size; /**< Requested physical size (bytes) */
225
enum drm_map_type type; /**< Type of memory to map */
226
enum drm_map_flags flags; /**< Flags */
227
void *handle; /**< User-space: "Handle" to pass to mmap() */
228
/**< Kernel-space: kernel-virtual address */
229
int mtrr; /**< MTRR slot used */
230
/* Private data */
231
};
232
233
/*
234
* DRM_IOCTL_GET_CLIENT ioctl argument type.
235
*/
236
struct drm_client {
237
int idx; /**< Which client desired? */
238
int auth; /**< Is client authenticated? */
239
unsigned long pid; /**< Process ID */
240
unsigned long uid; /**< User ID */
241
unsigned long magic; /**< Magic */
242
unsigned long iocs; /**< Ioctl count */
243
};
244
245
enum drm_stat_type {
246
_DRM_STAT_LOCK,
247
_DRM_STAT_OPENS,
248
_DRM_STAT_CLOSES,
249
_DRM_STAT_IOCTLS,
250
_DRM_STAT_LOCKS,
251
_DRM_STAT_UNLOCKS,
252
_DRM_STAT_VALUE, /**< Generic value */
253
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
254
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
255
256
_DRM_STAT_IRQ, /**< IRQ */
257
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */
258
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
259
_DRM_STAT_DMA, /**< DMA */
260
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
261
_DRM_STAT_MISSED /**< Missed DMA opportunity */
262
/* Add to the *END* of the list */
263
};
264
265
/*
266
* DRM_IOCTL_GET_STATS ioctl argument type.
267
*/
268
struct drm_stats {
269
unsigned long count;
270
struct {
271
unsigned long value;
272
enum drm_stat_type type;
273
} data[15];
274
};
275
276
/*
277
* Hardware locking flags.
278
*/
279
enum drm_lock_flags {
280
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
281
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
282
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
283
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
284
/* These *HALT* flags aren't supported yet
285
-- they will be used to support the
286
full-screen DGA-like mode. */
287
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
288
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
289
};
290
291
/*
292
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
293
*
294
* \sa drmGetLock() and drmUnlock().
295
*/
296
struct drm_lock {
297
int context;
298
enum drm_lock_flags flags;
299
};
300
301
/*
302
* DMA flags
303
*
304
* \warning
305
* These values \e must match xf86drm.h.
306
*
307
* \sa drm_dma.
308
*/
309
enum drm_dma_flags {
310
/* Flags for DMA buffer dispatch */
311
_DRM_DMA_BLOCK = 0x01, /**<
312
* Block until buffer dispatched.
313
*
314
* \note The buffer may not yet have
315
* been processed by the hardware --
316
* getting a hardware lock with the
317
* hardware quiescent will ensure
318
* that the buffer has been
319
* processed.
320
*/
321
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
322
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
323
324
/* Flags for DMA buffer request */
325
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
326
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
327
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
328
};
329
330
/*
331
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
332
*
333
* \sa drmAddBufs().
334
*/
335
struct drm_buf_desc {
336
int count; /**< Number of buffers of this size */
337
int size; /**< Size in bytes */
338
int low_mark; /**< Low water mark */
339
int high_mark; /**< High water mark */
340
enum {
341
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
342
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
343
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
344
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
345
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
346
} flags;
347
unsigned long agp_start; /**<
348
* Start address of where the AGP buffers are
349
* in the AGP aperture
350
*/
351
};
352
353
/*
354
* DRM_IOCTL_INFO_BUFS ioctl argument type.
355
*/
356
struct drm_buf_info {
357
int count; /**< Entries in list */
358
struct drm_buf_desc __user *list;
359
};
360
361
/*
362
* DRM_IOCTL_FREE_BUFS ioctl argument type.
363
*/
364
struct drm_buf_free {
365
int count;
366
int __user *list;
367
};
368
369
/*
370
* Buffer information
371
*
372
* \sa drm_buf_map.
373
*/
374
struct drm_buf_pub {
375
int idx; /**< Index into the master buffer list */
376
int total; /**< Buffer size */
377
int used; /**< Amount of buffer in use (for DMA) */
378
void __user *address; /**< Address of buffer */
379
};
380
381
/*
382
* DRM_IOCTL_MAP_BUFS ioctl argument type.
383
*/
384
struct drm_buf_map {
385
int count; /**< Length of the buffer list */
386
#ifdef __cplusplus
387
void __user *virt;
388
#else
389
void __user *virtual; /**< Mmap'd area in user-virtual */
390
#endif
391
struct drm_buf_pub __user *list; /**< Buffer information */
392
};
393
394
/*
395
* DRM_IOCTL_DMA ioctl argument type.
396
*
397
* Indices here refer to the offset into the buffer list in drm_buf_get.
398
*
399
* \sa drmDMA().
400
*/
401
struct drm_dma {
402
int context; /**< Context handle */
403
int send_count; /**< Number of buffers to send */
404
int __user *send_indices; /**< List of handles to buffers */
405
int __user *send_sizes; /**< Lengths of data to send */
406
enum drm_dma_flags flags; /**< Flags */
407
int request_count; /**< Number of buffers requested */
408
int request_size; /**< Desired size for buffers */
409
int __user *request_indices; /**< Buffer information */
410
int __user *request_sizes;
411
int granted_count; /**< Number of buffers granted */
412
};
413
414
enum drm_ctx_flags {
415
_DRM_CONTEXT_PRESERVED = 0x01,
416
_DRM_CONTEXT_2DONLY = 0x02
417
};
418
419
/*
420
* DRM_IOCTL_ADD_CTX ioctl argument type.
421
*
422
* \sa drmCreateContext() and drmDestroyContext().
423
*/
424
struct drm_ctx {
425
drm_context_t handle;
426
enum drm_ctx_flags flags;
427
};
428
429
/*
430
* DRM_IOCTL_RES_CTX ioctl argument type.
431
*/
432
struct drm_ctx_res {
433
int count;
434
struct drm_ctx __user *contexts;
435
};
436
437
/*
438
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
439
*/
440
struct drm_draw {
441
drm_drawable_t handle;
442
};
443
444
/*
445
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
446
*/
447
typedef enum {
448
DRM_DRAWABLE_CLIPRECTS
449
} drm_drawable_info_type_t;
450
451
struct drm_update_draw {
452
drm_drawable_t handle;
453
unsigned int type;
454
unsigned int num;
455
unsigned long long data;
456
};
457
458
/*
459
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
460
*/
461
struct drm_auth {
462
drm_magic_t magic;
463
};
464
465
/*
466
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
467
*
468
* \sa drmGetInterruptFromBusID().
469
*/
470
struct drm_irq_busid {
471
int irq; /**< IRQ number */
472
int busnum; /**< bus number */
473
int devnum; /**< device number */
474
int funcnum; /**< function number */
475
};
476
477
enum drm_vblank_seq_type {
478
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
479
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
480
/* bits 1-6 are reserved for high crtcs */
481
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
482
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
483
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
484
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
485
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
486
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
487
};
488
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
489
490
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
491
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
492
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
493
494
struct drm_wait_vblank_request {
495
enum drm_vblank_seq_type type;
496
unsigned int sequence;
497
unsigned long signal;
498
};
499
500
struct drm_wait_vblank_reply {
501
enum drm_vblank_seq_type type;
502
unsigned int sequence;
503
long tval_sec;
504
long tval_usec;
505
};
506
507
/*
508
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
509
*
510
* \sa drmWaitVBlank().
511
*/
512
union drm_wait_vblank {
513
struct drm_wait_vblank_request request;
514
struct drm_wait_vblank_reply reply;
515
};
516
517
#define _DRM_PRE_MODESET 1
518
#define _DRM_POST_MODESET 2
519
520
/*
521
* DRM_IOCTL_MODESET_CTL ioctl argument type
522
*
523
* \sa drmModesetCtl().
524
*/
525
struct drm_modeset_ctl {
526
__u32 crtc;
527
__u32 cmd;
528
};
529
530
/*
531
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
532
*
533
* \sa drmAgpEnable().
534
*/
535
struct drm_agp_mode {
536
unsigned long mode; /**< AGP mode */
537
};
538
539
/*
540
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
541
*
542
* \sa drmAgpAlloc() and drmAgpFree().
543
*/
544
struct drm_agp_buffer {
545
unsigned long size; /**< In bytes -- will round to page boundary */
546
unsigned long handle; /**< Used for binding / unbinding */
547
unsigned long type; /**< Type of memory to allocate */
548
unsigned long physical; /**< Physical used by i810 */
549
};
550
551
/*
552
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
553
*
554
* \sa drmAgpBind() and drmAgpUnbind().
555
*/
556
struct drm_agp_binding {
557
unsigned long handle; /**< From drm_agp_buffer */
558
unsigned long offset; /**< In bytes -- will round to page boundary */
559
};
560
561
/*
562
* DRM_IOCTL_AGP_INFO ioctl argument type.
563
*
564
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
565
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
566
* drmAgpVendorId() and drmAgpDeviceId().
567
*/
568
struct drm_agp_info {
569
int agp_version_major;
570
int agp_version_minor;
571
unsigned long mode;
572
unsigned long aperture_base; /* physical address */
573
unsigned long aperture_size; /* bytes */
574
unsigned long memory_allowed; /* bytes */
575
unsigned long memory_used;
576
577
/* PCI information */
578
unsigned short id_vendor;
579
unsigned short id_device;
580
};
581
582
/*
583
* DRM_IOCTL_SG_ALLOC ioctl argument type.
584
*/
585
struct drm_scatter_gather {
586
unsigned long size; /**< In bytes -- will round to page boundary */
587
unsigned long handle; /**< Used for mapping / unmapping */
588
};
589
590
/*
591
* DRM_IOCTL_SET_VERSION ioctl argument type.
592
*/
593
struct drm_set_version {
594
int drm_di_major;
595
int drm_di_minor;
596
int drm_dd_major;
597
int drm_dd_minor;
598
};
599
600
/**
601
* struct drm_gem_close - Argument for &DRM_IOCTL_GEM_CLOSE ioctl.
602
* @handle: Handle of the object to be closed.
603
* @pad: Padding.
604
*
605
* Releases the handle to an mm object.
606
*/
607
struct drm_gem_close {
608
__u32 handle;
609
__u32 pad;
610
};
611
612
/**
613
* struct drm_gem_flink - Argument for &DRM_IOCTL_GEM_FLINK ioctl.
614
* @handle: Handle for the object being named.
615
* @name: Returned global name.
616
*
617
* Create a global name for an object, returning the name.
618
*
619
* Note that the name does not hold a reference; when the object
620
* is freed, the name goes away.
621
*/
622
struct drm_gem_flink {
623
__u32 handle;
624
__u32 name;
625
};
626
627
/**
628
* struct drm_gem_open - Argument for &DRM_IOCTL_GEM_OPEN ioctl.
629
* @name: Name of object being opened.
630
* @handle: Returned handle for the object.
631
* @size: Returned size of the object
632
*
633
* Open an object using the global name, returning a handle and the size.
634
*
635
* This handle (of course) holds a reference to the object, so the object
636
* will not go away until the handle is deleted.
637
*/
638
struct drm_gem_open {
639
__u32 name;
640
__u32 handle;
641
__u64 size;
642
};
643
644
/**
645
* struct drm_gem_change_handle - Argument for &DRM_IOCTL_GEM_CHANGE_HANDLE ioctl.
646
* @handle: The handle of a gem object.
647
* @new_handle: An available gem handle.
648
*
649
* This ioctl changes the handle of a GEM object to the specified one.
650
* The new handle must be unused. On success the old handle is closed
651
* and all further IOCTL should refer to the new handle only.
652
* Calls to DRM_IOCTL_PRIME_FD_TO_HANDLE will return the new handle.
653
*/
654
struct drm_gem_change_handle {
655
__u32 handle;
656
__u32 new_handle;
657
};
658
659
/**
660
* DRM_CAP_DUMB_BUFFER
661
*
662
* If set to 1, the driver supports creating dumb buffers via the
663
* &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
664
*/
665
#define DRM_CAP_DUMB_BUFFER 0x1
666
/**
667
* DRM_CAP_VBLANK_HIGH_CRTC
668
*
669
* If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
670
* in the high bits of &drm_wait_vblank_request.type.
671
*
672
* Starting kernel version 2.6.39, this capability is always set to 1.
673
*/
674
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
675
/**
676
* DRM_CAP_DUMB_PREFERRED_DEPTH
677
*
678
* The preferred bit depth for dumb buffers.
679
*
680
* The bit depth is the number of bits used to indicate the color of a single
681
* pixel excluding any padding. This is different from the number of bits per
682
* pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
683
* pixel.
684
*
685
* Note that this preference only applies to dumb buffers, it's irrelevant for
686
* other types of buffers.
687
*/
688
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
689
/**
690
* DRM_CAP_DUMB_PREFER_SHADOW
691
*
692
* If set to 1, the driver prefers userspace to render to a shadow buffer
693
* instead of directly rendering to a dumb buffer. For best speed, userspace
694
* should do streaming ordered memory copies into the dumb buffer and never
695
* read from it.
696
*
697
* Note that this preference only applies to dumb buffers, it's irrelevant for
698
* other types of buffers.
699
*/
700
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
701
/**
702
* DRM_CAP_PRIME
703
*
704
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
705
* and &DRM_PRIME_CAP_EXPORT.
706
*
707
* Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
708
* &DRM_PRIME_CAP_EXPORT are always advertised.
709
*
710
* PRIME buffers are exposed as dma-buf file descriptors.
711
* See :ref:`prime_buffer_sharing`.
712
*/
713
#define DRM_CAP_PRIME 0x5
714
/**
715
* DRM_PRIME_CAP_IMPORT
716
*
717
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
718
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
719
*
720
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
721
*/
722
#define DRM_PRIME_CAP_IMPORT 0x1
723
/**
724
* DRM_PRIME_CAP_EXPORT
725
*
726
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
727
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
728
*
729
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
730
*/
731
#define DRM_PRIME_CAP_EXPORT 0x2
732
/**
733
* DRM_CAP_TIMESTAMP_MONOTONIC
734
*
735
* If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
736
* struct drm_event_vblank. If set to 1, the kernel will report timestamps with
737
* ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
738
* clocks.
739
*
740
* Starting from kernel version 2.6.39, the default value for this capability
741
* is 1. Starting kernel version 4.15, this capability is always set to 1.
742
*/
743
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
744
/**
745
* DRM_CAP_ASYNC_PAGE_FLIP
746
*
747
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
748
* page-flips.
749
*/
750
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
751
/**
752
* DRM_CAP_CURSOR_WIDTH
753
*
754
* The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
755
* width x height combination for the hardware cursor. The intention is that a
756
* hardware agnostic userspace can query a cursor plane size to use.
757
*
758
* Note that the cross-driver contract is to merely return a valid size;
759
* drivers are free to attach another meaning on top, eg. i915 returns the
760
* maximum plane size.
761
*/
762
#define DRM_CAP_CURSOR_WIDTH 0x8
763
/**
764
* DRM_CAP_CURSOR_HEIGHT
765
*
766
* See &DRM_CAP_CURSOR_WIDTH.
767
*/
768
#define DRM_CAP_CURSOR_HEIGHT 0x9
769
/**
770
* DRM_CAP_ADDFB2_MODIFIERS
771
*
772
* If set to 1, the driver supports supplying modifiers in the
773
* &DRM_IOCTL_MODE_ADDFB2 ioctl.
774
*/
775
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
776
/**
777
* DRM_CAP_PAGE_FLIP_TARGET
778
*
779
* If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
780
* &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
781
* &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
782
* ioctl.
783
*/
784
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
785
/**
786
* DRM_CAP_CRTC_IN_VBLANK_EVENT
787
*
788
* If set to 1, the kernel supports reporting the CRTC ID in
789
* &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
790
* &DRM_EVENT_FLIP_COMPLETE events.
791
*
792
* Starting kernel version 4.12, this capability is always set to 1.
793
*/
794
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
795
/**
796
* DRM_CAP_SYNCOBJ
797
*
798
* If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
799
*/
800
#define DRM_CAP_SYNCOBJ 0x13
801
/**
802
* DRM_CAP_SYNCOBJ_TIMELINE
803
*
804
* If set to 1, the driver supports timeline operations on sync objects. See
805
* :ref:`drm_sync_objects`.
806
*/
807
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
808
/**
809
* DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
810
*
811
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
812
* commits.
813
*/
814
#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
815
816
/* DRM_IOCTL_GET_CAP ioctl argument type */
817
struct drm_get_cap {
818
__u64 capability;
819
__u64 value;
820
};
821
822
/**
823
* DRM_CLIENT_CAP_STEREO_3D
824
*
825
* If set to 1, the DRM core will expose the stereo 3D capabilities of the
826
* monitor by advertising the supported 3D layouts in the flags of struct
827
* drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
828
*
829
* This capability is always supported for all drivers starting from kernel
830
* version 3.13.
831
*/
832
#define DRM_CLIENT_CAP_STEREO_3D 1
833
834
/**
835
* DRM_CLIENT_CAP_UNIVERSAL_PLANES
836
*
837
* If set to 1, the DRM core will expose all planes (overlay, primary, and
838
* cursor) to userspace.
839
*
840
* This capability has been introduced in kernel version 3.15. Starting from
841
* kernel version 3.17, this capability is always supported for all drivers.
842
*/
843
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
844
845
/**
846
* DRM_CLIENT_CAP_ATOMIC
847
*
848
* If set to 1, the DRM core will expose atomic properties to userspace. This
849
* implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
850
* &DRM_CLIENT_CAP_ASPECT_RATIO.
851
*
852
* If the driver doesn't support atomic mode-setting, enabling this capability
853
* will fail with -EOPNOTSUPP.
854
*
855
* This capability has been introduced in kernel version 4.0. Starting from
856
* kernel version 4.2, this capability is always supported for atomic-capable
857
* drivers.
858
*/
859
#define DRM_CLIENT_CAP_ATOMIC 3
860
861
/**
862
* DRM_CLIENT_CAP_ASPECT_RATIO
863
*
864
* If set to 1, the DRM core will provide aspect ratio information in modes.
865
* See ``DRM_MODE_FLAG_PIC_AR_*``.
866
*
867
* This capability is always supported for all drivers starting from kernel
868
* version 4.18.
869
*/
870
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
871
872
/**
873
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
874
*
875
* If set to 1, the DRM core will expose special connectors to be used for
876
* writing back to memory the scene setup in the commit. The client must enable
877
* &DRM_CLIENT_CAP_ATOMIC first.
878
*
879
* This capability is always supported for atomic-capable drivers starting from
880
* kernel version 4.19.
881
*/
882
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
883
884
/**
885
* DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
886
*
887
* Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
888
* virtualbox) have additional restrictions for cursor planes (thus
889
* making cursor planes on those drivers not truly universal,) e.g.
890
* they need cursor planes to act like one would expect from a mouse
891
* cursor and have correctly set hotspot properties.
892
* If this client cap is not set the DRM core will hide cursor plane on
893
* those virtualized drivers because not setting it implies that the
894
* client is not capable of dealing with those extra restictions.
895
* Clients which do set cursor hotspot and treat the cursor plane
896
* like a mouse cursor should set this property.
897
* The client must enable &DRM_CLIENT_CAP_ATOMIC first.
898
*
899
* Setting this property on drivers which do not special case
900
* cursor planes (i.e. non-virtualized drivers) will return
901
* EOPNOTSUPP, which can be used by userspace to gauge
902
* requirements of the hardware/drivers they're running on.
903
*
904
* This capability is always supported for atomic-capable virtualized
905
* drivers starting from kernel version 6.6.
906
*/
907
#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
908
909
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
910
struct drm_set_client_cap {
911
__u64 capability;
912
__u64 value;
913
};
914
915
#define DRM_RDWR O_RDWR
916
#define DRM_CLOEXEC O_CLOEXEC
917
struct drm_prime_handle {
918
__u32 handle;
919
920
/** Flags.. only applicable for handle->fd */
921
__u32 flags;
922
923
/** Returned dmabuf file descriptor */
924
__s32 fd;
925
};
926
927
struct drm_syncobj_create {
928
__u32 handle;
929
#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
930
__u32 flags;
931
};
932
933
struct drm_syncobj_destroy {
934
__u32 handle;
935
__u32 pad;
936
};
937
938
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
939
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
940
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
941
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
942
struct drm_syncobj_handle {
943
__u32 handle;
944
__u32 flags;
945
946
__s32 fd;
947
__u32 pad;
948
949
__u64 point;
950
};
951
952
struct drm_syncobj_transfer {
953
__u32 src_handle;
954
__u32 dst_handle;
955
__u64 src_point;
956
__u64 dst_point;
957
__u32 flags;
958
__u32 pad;
959
};
960
961
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
962
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
963
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
964
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
965
struct drm_syncobj_wait {
966
__u64 handles;
967
/* absolute timeout */
968
__s64 timeout_nsec;
969
__u32 count_handles;
970
__u32 flags;
971
__u32 first_signaled; /* only valid when not waiting all */
972
__u32 pad;
973
/**
974
* @deadline_nsec - fence deadline hint
975
*
976
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
977
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
978
* set.
979
*/
980
__u64 deadline_nsec;
981
};
982
983
struct drm_syncobj_timeline_wait {
984
__u64 handles;
985
/* wait on specific timeline point for every handles*/
986
__u64 points;
987
/* absolute timeout */
988
__s64 timeout_nsec;
989
__u32 count_handles;
990
__u32 flags;
991
__u32 first_signaled; /* only valid when not waiting all */
992
__u32 pad;
993
/**
994
* @deadline_nsec - fence deadline hint
995
*
996
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
997
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
998
* set.
999
*/
1000
__u64 deadline_nsec;
1001
};
1002
1003
/**
1004
* struct drm_syncobj_eventfd
1005
* @handle: syncobj handle.
1006
* @flags: Zero to wait for the point to be signalled, or
1007
* &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
1008
* available for the point.
1009
* @point: syncobj timeline point (set to zero for binary syncobjs).
1010
* @fd: Existing eventfd to sent events to.
1011
* @pad: Must be zero.
1012
*
1013
* Register an eventfd to be signalled by a syncobj. The eventfd counter will
1014
* be incremented by one.
1015
*/
1016
struct drm_syncobj_eventfd {
1017
__u32 handle;
1018
__u32 flags;
1019
__u64 point;
1020
__s32 fd;
1021
__u32 pad;
1022
};
1023
1024
1025
struct drm_syncobj_array {
1026
__u64 handles;
1027
__u32 count_handles;
1028
__u32 pad;
1029
};
1030
1031
#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
1032
struct drm_syncobj_timeline_array {
1033
__u64 handles;
1034
__u64 points;
1035
__u32 count_handles;
1036
__u32 flags;
1037
};
1038
1039
1040
/* Query current scanout sequence number */
1041
struct drm_crtc_get_sequence {
1042
__u32 crtc_id; /* requested crtc_id */
1043
__u32 active; /* return: crtc output is active */
1044
__u64 sequence; /* return: most recent vblank sequence */
1045
__s64 sequence_ns; /* return: most recent time of first pixel out */
1046
};
1047
1048
/* Queue event to be delivered at specified sequence. Time stamp marks
1049
* when the first pixel of the refresh cycle leaves the display engine
1050
* for the display
1051
*/
1052
#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
1053
#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
1054
1055
struct drm_crtc_queue_sequence {
1056
__u32 crtc_id;
1057
__u32 flags;
1058
__u64 sequence; /* on input, target sequence. on output, actual sequence */
1059
__u64 user_data; /* user data passed to event */
1060
};
1061
1062
#define DRM_CLIENT_NAME_MAX_LEN 64
1063
struct drm_set_client_name {
1064
__u64 name_len;
1065
__u64 name;
1066
};
1067
1068
1069
#if defined(__cplusplus)
1070
}
1071
#endif
1072
1073
#include "drm_mode.h"
1074
1075
#if defined(__cplusplus)
1076
extern "C" {
1077
#endif
1078
1079
#define DRM_IOCTL_BASE 'd'
1080
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
1081
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
1082
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
1083
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
1084
1085
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
1086
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
1087
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
1088
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
1089
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
1090
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
1091
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
1092
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
1093
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
1094
/**
1095
* DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
1096
*
1097
* GEM handles are not reference-counted by the kernel. User-space is
1098
* responsible for managing their lifetime. For example, if user-space imports
1099
* the same memory object twice on the same DRM file description, the same GEM
1100
* handle is returned by both imports, and user-space needs to ensure
1101
* &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
1102
* when a memory object is allocated, then exported and imported again on the
1103
* same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
1104
* and always returns fresh new GEM handles even if an existing GEM handle
1105
* already refers to the same memory object before the IOCTL is performed.
1106
*/
1107
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
1108
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
1109
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
1110
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
1111
#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
1112
1113
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
1114
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
1115
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
1116
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
1117
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
1118
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
1119
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
1120
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
1121
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
1122
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
1123
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
1124
1125
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
1126
1127
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1128
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1129
1130
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
1131
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
1132
1133
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
1134
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
1135
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
1136
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
1137
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
1138
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
1139
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
1140
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
1141
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
1142
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
1143
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
1144
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
1145
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
1146
1147
/**
1148
* DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
1149
*
1150
* User-space sets &drm_prime_handle.handle with the GEM handle to export and
1151
* &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
1152
* &drm_prime_handle.fd.
1153
*
1154
* The export can fail for any driver-specific reason, e.g. because export is
1155
* not supported for this specific GEM handle (but might be for others).
1156
*
1157
* Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
1158
*/
1159
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
1160
/**
1161
* DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
1162
*
1163
* User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
1164
* import, and gets back a GEM handle in &drm_prime_handle.handle.
1165
* &drm_prime_handle.flags is unused.
1166
*
1167
* If an existing GEM handle refers to the memory object backing the DMA-BUF,
1168
* that GEM handle is returned. Therefore user-space which needs to handle
1169
* arbitrary DMA-BUFs must have a user-space lookup data structure to manually
1170
* reference-count duplicated GEM handles. For more information see
1171
* &DRM_IOCTL_GEM_CLOSE.
1172
*
1173
* The import can fail for any driver-specific reason, e.g. because import is
1174
* only supported for DMA-BUFs allocated on this DRM device.
1175
*
1176
* Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
1177
*/
1178
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
1179
1180
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
1181
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
1182
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
1183
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
1184
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
1185
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
1186
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
1187
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
1188
1189
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
1190
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
1191
1192
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
1193
1194
#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
1195
#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
1196
1197
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
1198
1199
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
1200
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
1201
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
1202
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
1203
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1204
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1205
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1206
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
1207
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1208
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1209
1210
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
1211
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1212
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
1213
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1214
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1215
/**
1216
* DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
1217
*
1218
* This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1219
* argument is a framebuffer object ID.
1220
*
1221
* Warning: removing a framebuffer currently in-use on an enabled plane will
1222
* disable that plane. The CRTC the plane is linked to may also be disabled
1223
* (depending on driver capabilities).
1224
*/
1225
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
1226
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
1227
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
1228
1229
/**
1230
* DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
1231
*
1232
* KMS dumb buffers provide a very primitive way to allocate a buffer object
1233
* suitable for scanout and map it for software rendering. KMS dumb buffers are
1234
* not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
1235
* buffers are not suitable to be displayed on any other device than the KMS
1236
* device where they were allocated from. Also see
1237
* :ref:`kms_dumb_buffer_objects`.
1238
*
1239
* The IOCTL argument is a struct drm_mode_create_dumb.
1240
*
1241
* User-space is expected to create a KMS dumb buffer via this IOCTL, then add
1242
* it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
1243
* &DRM_IOCTL_MODE_MAP_DUMB.
1244
*
1245
* &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
1246
* &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
1247
* driver preferences for dumb buffers.
1248
*/
1249
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
1250
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
1251
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
1252
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
1253
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
1254
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
1255
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
1256
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
1257
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
1258
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
1259
#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
1260
#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
1261
#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
1262
1263
#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create)
1264
#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
1265
#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
1266
#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
1267
#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
1268
#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
1269
#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
1270
1271
#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
1272
#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
1273
#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
1274
#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
1275
1276
#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
1277
#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
1278
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
1279
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
1280
1281
/**
1282
* DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
1283
*
1284
* This queries metadata about a framebuffer. User-space fills
1285
* &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
1286
* struct as the output.
1287
*
1288
* If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
1289
* will be filled with GEM buffer handles. Fresh new GEM handles are always
1290
* returned, even if another GEM handle referring to the same memory object
1291
* already exists on the DRM file description. The caller is responsible for
1292
* removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
1293
* new handle will be returned for multiple planes in case they use the same
1294
* memory object. Planes are valid until one has a zero handle -- this can be
1295
* used to compute the number of planes.
1296
*
1297
* Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
1298
* until one has a zero &drm_mode_fb_cmd2.pitches.
1299
*
1300
* If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
1301
* in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
1302
* modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
1303
*
1304
* To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
1305
* can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
1306
* close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
1307
* double-close handles which are specified multiple times in the array.
1308
*/
1309
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
1310
1311
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
1312
1313
/**
1314
* DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
1315
*
1316
* This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1317
* argument is a framebuffer object ID.
1318
*
1319
* This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
1320
* planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
1321
* alive. When the plane no longer uses the framebuffer (because the
1322
* framebuffer is replaced with another one, or the plane is disabled), the
1323
* framebuffer is cleaned up.
1324
*
1325
* This is useful to implement flicker-free transitions between two processes.
1326
*
1327
* Depending on the threat model, user-space may want to ensure that the
1328
* framebuffer doesn't expose any sensitive user information: closed
1329
* framebuffers attached to a plane can be read back by the next DRM master.
1330
*/
1331
#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
1332
1333
/**
1334
* DRM_IOCTL_SET_CLIENT_NAME - Attach a name to a drm_file
1335
*
1336
* Having a name allows for easier tracking and debugging.
1337
* The length of the name (without null ending char) must be
1338
* <= DRM_CLIENT_NAME_MAX_LEN.
1339
* The call will fail if the name contains whitespaces or non-printable chars.
1340
*/
1341
#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name)
1342
1343
/**
1344
* DRM_IOCTL_GEM_CHANGE_HANDLE - Move an object to a different handle
1345
*
1346
* Some applications (notably CRIU) need objects to have specific gem handles.
1347
* This ioctl changes the object at one gem handle to use a new gem handle.
1348
*/
1349
#define DRM_IOCTL_GEM_CHANGE_HANDLE DRM_IOWR(0xD2, struct drm_gem_change_handle)
1350
1351
/*
1352
* Device specific ioctls should only be in their respective headers
1353
* The device specific ioctl range is from 0x40 to 0x9f.
1354
* Generic IOCTLS restart at 0xA0.
1355
*
1356
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1357
* drmCommandReadWrite().
1358
*/
1359
#define DRM_COMMAND_BASE 0x40
1360
#define DRM_COMMAND_END 0xA0
1361
1362
/**
1363
* struct drm_event - Header for DRM events
1364
* @type: event type.
1365
* @length: total number of payload bytes (including header).
1366
*
1367
* This struct is a header for events written back to user-space on the DRM FD.
1368
* A read on the DRM FD will always only return complete events: e.g. if the
1369
* read buffer is 100 bytes large and there are two 64 byte events pending,
1370
* only one will be returned.
1371
*
1372
* Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
1373
* up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
1374
* &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
1375
*/
1376
struct drm_event {
1377
__u32 type;
1378
__u32 length;
1379
};
1380
1381
/**
1382
* DRM_EVENT_VBLANK - vertical blanking event
1383
*
1384
* This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
1385
* &_DRM_VBLANK_EVENT flag set.
1386
*
1387
* The event payload is a struct drm_event_vblank.
1388
*/
1389
#define DRM_EVENT_VBLANK 0x01
1390
/**
1391
* DRM_EVENT_FLIP_COMPLETE - page-flip completion event
1392
*
1393
* This event is sent in response to an atomic commit or legacy page-flip with
1394
* the &DRM_MODE_PAGE_FLIP_EVENT flag set.
1395
*
1396
* The event payload is a struct drm_event_vblank.
1397
*/
1398
#define DRM_EVENT_FLIP_COMPLETE 0x02
1399
/**
1400
* DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
1401
*
1402
* This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
1403
*
1404
* The event payload is a struct drm_event_crtc_sequence.
1405
*/
1406
#define DRM_EVENT_CRTC_SEQUENCE 0x03
1407
1408
struct drm_event_vblank {
1409
struct drm_event base;
1410
__u64 user_data;
1411
__u32 tv_sec;
1412
__u32 tv_usec;
1413
__u32 sequence;
1414
__u32 crtc_id; /* 0 on older kernels that do not support this */
1415
};
1416
1417
/* Event delivered at sequence. Time stamp marks when the first pixel
1418
* of the refresh cycle leaves the display engine for the display
1419
*/
1420
struct drm_event_crtc_sequence {
1421
struct drm_event base;
1422
__u64 user_data;
1423
__s64 time_ns;
1424
__u64 sequence;
1425
};
1426
1427
/* typedef area */
1428
#ifndef __KERNEL__
1429
typedef struct drm_clip_rect drm_clip_rect_t;
1430
typedef struct drm_drawable_info drm_drawable_info_t;
1431
typedef struct drm_tex_region drm_tex_region_t;
1432
typedef struct drm_hw_lock drm_hw_lock_t;
1433
typedef struct drm_version drm_version_t;
1434
typedef struct drm_unique drm_unique_t;
1435
typedef struct drm_list drm_list_t;
1436
typedef struct drm_block drm_block_t;
1437
typedef struct drm_control drm_control_t;
1438
typedef enum drm_map_type drm_map_type_t;
1439
typedef enum drm_map_flags drm_map_flags_t;
1440
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1441
typedef struct drm_map drm_map_t;
1442
typedef struct drm_client drm_client_t;
1443
typedef enum drm_stat_type drm_stat_type_t;
1444
typedef struct drm_stats drm_stats_t;
1445
typedef enum drm_lock_flags drm_lock_flags_t;
1446
typedef struct drm_lock drm_lock_t;
1447
typedef enum drm_dma_flags drm_dma_flags_t;
1448
typedef struct drm_buf_desc drm_buf_desc_t;
1449
typedef struct drm_buf_info drm_buf_info_t;
1450
typedef struct drm_buf_free drm_buf_free_t;
1451
typedef struct drm_buf_pub drm_buf_pub_t;
1452
typedef struct drm_buf_map drm_buf_map_t;
1453
typedef struct drm_dma drm_dma_t;
1454
typedef union drm_wait_vblank drm_wait_vblank_t;
1455
typedef struct drm_agp_mode drm_agp_mode_t;
1456
typedef enum drm_ctx_flags drm_ctx_flags_t;
1457
typedef struct drm_ctx drm_ctx_t;
1458
typedef struct drm_ctx_res drm_ctx_res_t;
1459
typedef struct drm_draw drm_draw_t;
1460
typedef struct drm_update_draw drm_update_draw_t;
1461
typedef struct drm_auth drm_auth_t;
1462
typedef struct drm_irq_busid drm_irq_busid_t;
1463
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1464
1465
typedef struct drm_agp_buffer drm_agp_buffer_t;
1466
typedef struct drm_agp_binding drm_agp_binding_t;
1467
typedef struct drm_agp_info drm_agp_info_t;
1468
typedef struct drm_scatter_gather drm_scatter_gather_t;
1469
typedef struct drm_set_version drm_set_version_t;
1470
#endif
1471
1472
#if defined(__cplusplus)
1473
}
1474
#endif
1475
1476
#endif
1477
1478