Now About Social Code
summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/config_kernel.h43
-rw-r--r--src/drm.h1408
-rw-r--r--src/drm_mode.h1360
-rw-r--r--src/drvemu.c530
-rw-r--r--src/pvr_ioctl.h20
-rw-r--r--src/pvr_ioctl.inc251
-rw-r--r--src/xf86drm.h983
7 files changed, 4595 insertions, 0 deletions
diff --git a/src/config_kernel.h b/src/config_kernel.h
new file mode 100644
index 0000000..88b7dba
--- /dev/null
+++ b/src/config_kernel.h
@@ -0,0 +1,43 @@
+#define LINUX
+#define PVR_BUILD_DIR "jz4780_linux"
+#define PVR_BUILD_TYPE "release"
+#define PVRSRV_MODNAME "pvrsrvkm"
+#define SGXCORE 540
+#define SGX540
+#define SUPPORT_SGX540
+#define SUPPORT_SGX
+#define SGX_CORE_REV 130
+#define TRANSFER_QUEUE
+#define PVR_SECURE_HANDLES
+#define SUPPORT_LISR_MISR_SYNC
+#define DISPLAY_CONTROLLER dc_xbfb3_linux
+#define PVR_LINUX_MEM_AREA_POOL_MAX_PAGES 0
+#define RELEASE
+#define SUPPORT_PERCONTEXT_PB
+#define SUPPORT_HW_RECOVERY
+#define SUPPORT_ACTIVE_POWER_MANAGEMENT
+#define SUPPORT_SGX_HWPERF
+#define SUPPORT_SGX_LOW_LATENCY_SCHEDULING
+#define SUPPORT_SGX_NEW_STATUS_VALS
+#define SUPPORT_DBGDRV_EVENT_OBJECTS
+#define SGX_FEATURE_SYSTEM_CACHE
+#define SGX_FAST_DPM_INIT
+#define SYS_USING_INTERRUPTS
+#define SUPPORT_PVRSRV_DEVICE_CLASS
+#define SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER
+#define IMG_ADDRSPACE_PHYSADDR_BITS 32
+#define SUPPORT_LINUX_X86_WRITECOMBINE
+#define SUPPORT_LINUX_X86_PAT
+#define SGX_DYNAMIC_TIMING_INFO
+#define SYS_CUSTOM_POWERLOCK_WRAP
+#define PVR_LINUX_USING_WORKQUEUES
+#define PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE
+#define PVR_LINUX_TIMERS_USING_WORKQUEUES
+#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm"
+#define XPROC_WORKAROUND_NUM_SHAREABLES 4095
+#define SUPPORT_DRI_DRM
+#define PVR_SECURE_DRM_AUTH_EXPORT
+#define PVR_DRI_DRM_NOT_PCI
+#define DC_NOHW_BUFFER_WIDTH 1920
+#define DC_NOHW_BUFFER_HEIGHT 1080
+#define SUPPORT_DMABUF
diff --git a/src/drm.h b/src/drm.h
new file mode 100644
index 0000000..4e4f7c2
--- /dev/null
+++ b/src/drm.h
@@ -0,0 +1,1408 @@
+/*
+ * Header for the Direct Rendering Manager
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#if defined(__linux__)
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
+
+#else /* One of the BSDs */
+
+#include <stdint.h>
+#include <sys/ioccom.h>
+#include <sys/types.h>
+typedef int8_t __s8;
+typedef uint8_t __u8;
+typedef int16_t __s16;
+typedef uint16_t __u16;
+typedef int32_t __s32;
+typedef uint32_t __u32;
+typedef int64_t __s64;
+typedef uint64_t __u64;
+typedef size_t __kernel_size_t;
+typedef unsigned long drm_handle_t;
+
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+typedef unsigned int drm_context_t;
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t;
+
+/*
+ * Cliprect.
+ *
+ * \warning: If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well
+ *
+ * \note KW: Actually it's illegal to change either for
+ * backwards-compatibility reasons.
+ */
+struct drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+};
+
+/*
+ * Drawable information.
+ */
+struct drm_drawable_info {
+ unsigned int num_rects;
+ struct drm_clip_rect *rects;
+};
+
+/*
+ * Texture region,
+ */
+struct drm_tex_region {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding;
+ unsigned int age;
+};
+
+/*
+ * Hardware lock.
+ *
+ * The lock structure is a simple cache-line aligned integer. To avoid
+ * processor bus contention on a multiprocessor system, there should not be any
+ * other data stored in the same cache line.
+ */
+struct drm_hw_lock {
+ __volatile__ unsigned int lock; /**< lock variable */
+ char padding[60]; /**< Pad to cache line */
+};
+
+/*
+ * DRM_IOCTL_VERSION ioctl argument type.
+ *
+ * \sa drmGetVersion().
+ */
+struct drm_version {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ __kernel_size_t name_len; /**< Length of name buffer */
+ char *name; /**< Name of driver */
+ __kernel_size_t date_len; /**< Length of date buffer */
+ char *date; /**< User-space buffer to hold date */
+ __kernel_size_t desc_len; /**< Length of desc buffer */
+ char *desc; /**< User-space buffer to hold desc */
+};
+
+/*
+ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
+ *
+ * \sa drmGetBusid() and drmSetBusId().
+ */
+struct drm_unique {
+ __kernel_size_t unique_len; /**< Length of unique */
+ char *unique; /**< Unique name for driver instantiation */
+};
+
+struct drm_list {
+ int count; /**< Length of user-space structures */
+ struct drm_version *version;
+};
+
+struct drm_block {
+ int unused;
+};
+
+/*
+ * DRM_IOCTL_CONTROL ioctl argument type.
+ *
+ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
+ */
+struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+};
+
+/*
+ * Type of memory to map.
+ */
+enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /**< no caching, no core dump */
+ _DRM_SHM = 2, /**< shared, cached */
+ _DRM_AGP = 3, /**< AGP/GART */
+ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
+ _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
+};
+
+/*
+ * Memory mapping flags.
+ */
+enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /**< shared, cached, locked */
+ _DRM_KERNEL = 0x08, /**< kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
+};
+
+struct drm_ctx_priv_map {
+ unsigned int ctx_id; /**< Context requesting private mapping */
+ void *handle; /**< Handle of map */
+};
+
+/*
+ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
+ * argument type.
+ *
+ * \sa drmAddMap().
+ */
+struct drm_map {
+ unsigned long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ void *handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+ /* Private data */
+};
+
+/*
+ * DRM_IOCTL_GET_CLIENT ioctl argument type.
+ */
+struct drm_client {
+ int idx; /**< Which client desired? */
+ int auth; /**< Is client authenticated? */
+ unsigned long pid; /**< Process ID */
+ unsigned long uid; /**< User ID */
+ unsigned long magic; /**< Magic */
+ unsigned long iocs; /**< Ioctl count */
+};
+
+enum drm_stat_type {
+ _DRM_STAT_LOCK,
+ _DRM_STAT_OPENS,
+ _DRM_STAT_CLOSES,
+ _DRM_STAT_IOCTLS,
+ _DRM_STAT_LOCKS,
+ _DRM_STAT_UNLOCKS,
+ _DRM_STAT_VALUE, /**< Generic value */
+ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
+ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
+
+ _DRM_STAT_IRQ, /**< IRQ */
+ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
+ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
+ _DRM_STAT_DMA, /**< DMA */
+ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
+ _DRM_STAT_MISSED /**< Missed DMA opportunity */
+ /* Add to the *END* of the list */
+};
+
+/*
+ * DRM_IOCTL_GET_STATS ioctl argument type.
+ */
+struct drm_stats {
+ unsigned long count;
+ struct {
+ unsigned long value;
+ enum drm_stat_type type;
+ } data[15];
+};
+
+/*
+ * Hardware locking flags.
+ */
+enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+};
+
+/*
+ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
+ *
+ * \sa drmGetLock() and drmUnlock().
+ */
+struct drm_lock {
+ int context;
+ enum drm_lock_flags flags;
+};
+
+/*
+ * DMA flags
+ *
+ * \warning
+ * These values \e must match xf86drm.h.
+ *
+ * \sa drm_dma.
+ */
+enum drm_dma_flags {
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note The buffer may not yet have
+ * been processed by the hardware --
+ * getting a hardware lock with the
+ * hardware quiescent will ensure
+ * that the buffer has been
+ * processed.
+ */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+};
+
+/*
+ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
+ *
+ * \sa drmAddBufs().
+ */
+struct drm_buf_desc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ enum {
+ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
+ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
+ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
+ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
+ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
+ } flags;
+ unsigned long agp_start; /**<
+ * Start address of where the AGP buffers are
+ * in the AGP aperture
+ */
+};
+
+/*
+ * DRM_IOCTL_INFO_BUFS ioctl argument type.
+ */
+struct drm_buf_info {
+ int count; /**< Entries in list */
+ struct drm_buf_desc *list;
+};
+
+/*
+ * DRM_IOCTL_FREE_BUFS ioctl argument type.
+ */
+struct drm_buf_free {
+ int count;
+ int *list;
+};
+
+/*
+ * Buffer information
+ *
+ * \sa drm_buf_map.
+ */
+struct drm_buf_pub {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ void *address; /**< Address of buffer */
+};
+
+/*
+ * DRM_IOCTL_MAP_BUFS ioctl argument type.
+ */
+struct drm_buf_map {
+ int count; /**< Length of the buffer list */
+#ifdef __cplusplus
+ void *virt;
+#else
+ void *virtual; /**< Mmap'd area in user-virtual */
+#endif
+ struct drm_buf_pub *list; /**< Buffer information */
+};
+
+/*
+ * DRM_IOCTL_DMA ioctl argument type.
+ *
+ * Indices here refer to the offset into the buffer list in drm_buf_get.
+ *
+ * \sa drmDMA().
+ */
+struct drm_dma {
+ int context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int *send_indices; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send */
+ enum drm_dma_flags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size for buffers */
+ int *request_indices; /**< Buffer information */
+ int *request_sizes;
+ int granted_count; /**< Number of buffers granted */
+};
+
+enum drm_ctx_flags {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+};
+
+/*
+ * DRM_IOCTL_ADD_CTX ioctl argument type.
+ *
+ * \sa drmCreateContext() and drmDestroyContext().
+ */
+struct drm_ctx {
+ drm_context_t handle;
+ enum drm_ctx_flags flags;
+};
+
+/*
+ * DRM_IOCTL_RES_CTX ioctl argument type.
+ */
+struct drm_ctx_res {
+ int count;
+ struct drm_ctx *contexts;
+};
+
+/*
+ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
+ */
+struct drm_draw {
+ drm_drawable_t handle;
+};
+
+/*
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+ DRM_DRAWABLE_CLIPRECTS
+} drm_drawable_info_type_t;
+
+struct drm_update_draw {
+ drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ unsigned long long data;
+};
+
+/*
+ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
+ */
+struct drm_auth {
+ drm_magic_t magic;
+};
+
+/*
+ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
+ *
+ * \sa drmGetInterruptFromBusID().
+ */
+struct drm_irq_busid {
+ int irq; /**< IRQ number */
+ int busnum; /**< bus number */
+ int devnum; /**< device number */
+ int funcnum; /**< function number */
+};
+
+enum drm_vblank_seq_type {
+ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ /* bits 1-6 are reserved for high crtcs */
+ _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
+ _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
+};
+#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
+
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
+
+struct drm_wait_vblank_request {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+};
+
+/*
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
+};
+
+#define _DRM_PRE_MODESET 1
+#define _DRM_POST_MODESET 2
+
+/*
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+struct drm_modeset_ctl {
+ __u32 crtc;
+ __u32 cmd;
+};
+
+/*
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+ * \sa drmAgpEnable().
+ */
+struct drm_agp_mode {
+ unsigned long mode; /**< AGP mode */
+};
+
+/*
+ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
+ *
+ * \sa drmAgpAlloc() and drmAgpFree().
+ */
+struct drm_agp_buffer {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for binding / unbinding */
+ unsigned long type; /**< Type of memory to allocate */
+ unsigned long physical; /**< Physical used by i810 */
+};
+
+/*
+ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
+ *
+ * \sa drmAgpBind() and drmAgpUnbind().
+ */
+struct drm_agp_binding {
+ unsigned long handle; /**< From drm_agp_buffer */
+ unsigned long offset; /**< In bytes -- will round to page boundary */
+};
+
+/*
+ * DRM_IOCTL_AGP_INFO ioctl argument type.
+ *
+ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
+ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
+ * drmAgpVendorId() and drmAgpDeviceId().
+ */
+struct drm_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /* physical address */
+ unsigned long aperture_size; /* bytes */
+ unsigned long memory_allowed; /* bytes */
+ unsigned long memory_used;
+
+ /* PCI information */
+ unsigned short id_vendor;
+ unsigned short id_device;
+};
+
+/*
+ * DRM_IOCTL_SG_ALLOC ioctl argument type.
+ */
+struct drm_scatter_gather {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for mapping / unmapping */
+};
+
+/*
+ * DRM_IOCTL_SET_VERSION ioctl argument type.
+ */
+struct drm_set_version {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+};
+
+/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ __u32 handle;
+ __u32 pad;
+};
+
+/* DRM_IOCTL_GEM_FLINK ioctl argument type */
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ __u32 handle;
+
+ /** Returned global name */
+ __u32 name;
+};
+
+/* DRM_IOCTL_GEM_OPEN ioctl argument type */
+struct drm_gem_open {
+ /** Name of object being opened */
+ __u32 name;
+
+ /** Returned handle for the object */
+ __u32 handle;
+
+ /** Returned size of the object */
+ __u64 size;
+};
+
+/**
+ * DRM_CAP_DUMB_BUFFER
+ *
+ * If set to 1, the driver supports creating dumb buffers via the
+ * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
+ */
+#define DRM_CAP_DUMB_BUFFER 0x1
+/**
+ * DRM_CAP_VBLANK_HIGH_CRTC
+ *
+ * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
+ * in the high bits of &drm_wait_vblank_request.type.
+ *
+ * Starting kernel version 2.6.39, this capability is always set to 1.
+ */
+#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+/**
+ * DRM_CAP_DUMB_PREFERRED_DEPTH
+ *
+ * The preferred bit depth for dumb buffers.
+ *
+ * The bit depth is the number of bits used to indicate the color of a single
+ * pixel excluding any padding. This is different from the number of bits per
+ * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
+ * pixel.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+/**
+ * DRM_CAP_DUMB_PREFER_SHADOW
+ *
+ * If set to 1, the driver prefers userspace to render to a shadow buffer
+ * instead of directly rendering to a dumb buffer. For best speed, userspace
+ * should do streaming ordered memory copies into the dumb buffer and never
+ * read from it.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+/**
+ * DRM_CAP_PRIME
+ *
+ * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
+ * and &DRM_PRIME_CAP_EXPORT.
+ *
+ * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
+ * &DRM_PRIME_CAP_EXPORT are always advertised.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors.
+ * See :ref:`prime_buffer_sharing`.
+ */
+#define DRM_CAP_PRIME 0x5
+/**
+ * DRM_PRIME_CAP_IMPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
+ * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
+ */
+#define DRM_PRIME_CAP_IMPORT 0x1
+/**
+ * DRM_PRIME_CAP_EXPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
+ * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
+ */
+#define DRM_PRIME_CAP_EXPORT 0x2
+/**
+ * DRM_CAP_TIMESTAMP_MONOTONIC
+ *
+ * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
+ * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
+ * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
+ * clocks.
+ *
+ * Starting from kernel version 2.6.39, the default value for this capability
+ * is 1. Starting kernel version 4.15, this capability is always set to 1.
+ */
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+/**
+ * DRM_CAP_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
+ * page-flips.
+ */
+#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
+/**
+ * DRM_CAP_CURSOR_WIDTH
+ *
+ * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
+ * width x height combination for the hardware cursor. The intention is that a
+ * hardware agnostic userspace can query a cursor plane size to use.
+ *
+ * Note that the cross-driver contract is to merely return a valid size;
+ * drivers are free to attach another meaning on top, eg. i915 returns the
+ * maximum plane size.
+ */
+#define DRM_CAP_CURSOR_WIDTH 0x8
+/**
+ * DRM_CAP_CURSOR_HEIGHT
+ *
+ * See &DRM_CAP_CURSOR_WIDTH.
+ */
+#define DRM_CAP_CURSOR_HEIGHT 0x9
+/**
+ * DRM_CAP_ADDFB2_MODIFIERS
+ *
+ * If set to 1, the driver supports supplying modifiers in the
+ * &DRM_IOCTL_MODE_ADDFB2 ioctl.
+ */
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+/**
+ * DRM_CAP_PAGE_FLIP_TARGET
+ *
+ * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
+ * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
+ * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
+ * ioctl.
+ */
+#define DRM_CAP_PAGE_FLIP_TARGET 0x11
+/**
+ * DRM_CAP_CRTC_IN_VBLANK_EVENT
+ *
+ * If set to 1, the kernel supports reporting the CRTC ID in
+ * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
+ * &DRM_EVENT_FLIP_COMPLETE events.
+ *
+ * Starting kernel version 4.12, this capability is always set to 1.
+ */
+#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
+/**
+ * DRM_CAP_SYNCOBJ
+ *
+ * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
+ */
+#define DRM_CAP_SYNCOBJ 0x13
+/**
+ * DRM_CAP_SYNCOBJ_TIMELINE
+ *
+ * If set to 1, the driver supports timeline operations on sync objects. See
+ * :ref:`drm_sync_objects`.
+ */
+#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
+/**
+ * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
+ * commits.
+ */
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
+
+/* DRM_IOCTL_GET_CAP ioctl argument type */
+struct drm_get_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+/**
+ * DRM_CLIENT_CAP_STEREO_3D
+ *
+ * If set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * monitor by advertising the supported 3D layouts in the flags of struct
+ * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 3.13.
+ */
+#define DRM_CLIENT_CAP_STEREO_3D 1
+
+/**
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES
+ *
+ * If set to 1, the DRM core will expose all planes (overlay, primary, and
+ * cursor) to userspace.
+ *
+ * This capability has been introduced in kernel version 3.15. Starting from
+ * kernel version 3.17, this capability is always supported for all drivers.
+ */
+#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
+
+/**
+ * DRM_CLIENT_CAP_ATOMIC
+ *
+ * If set to 1, the DRM core will expose atomic properties to userspace. This
+ * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
+ * &DRM_CLIENT_CAP_ASPECT_RATIO.
+ *
+ * If the driver doesn't support atomic mode-setting, enabling this capability
+ * will fail with -EOPNOTSUPP.
+ *
+ * This capability has been introduced in kernel version 4.0. Starting from
+ * kernel version 4.2, this capability is always supported for atomic-capable
+ * drivers.
+ */
+#define DRM_CLIENT_CAP_ATOMIC 3
+
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ * See ``DRM_MODE_FLAG_PIC_AR_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 4.18.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO 4
+
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. The client must enable
+ * &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * This capability is always supported for atomic-capable drivers starting from
+ * kernel version 4.19.
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+
+/**
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
+ *
+ * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
+ * virtualbox) have additional restrictions for cursor planes (thus
+ * making cursor planes on those drivers not truly universal,) e.g.
+ * they need cursor planes to act like one would expect from a mouse
+ * cursor and have correctly set hotspot properties.
+ * If this client cap is not set the DRM core will hide cursor plane on
+ * those virtualized drivers because not setting it implies that the
+ * client is not capable of dealing with those extra restictions.
+ * Clients which do set cursor hotspot and treat the cursor plane
+ * like a mouse cursor should set this property.
+ * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * Setting this property on drivers which do not special case
+ * cursor planes (i.e. non-virtualized drivers) will return
+ * EOPNOTSUPP, which can be used by userspace to gauge
+ * requirements of the hardware/drivers they're running on.
+ *
+ * This capability is always supported for atomic-capable virtualized
+ * drivers starting from kernel version 6.6.
+ */
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
+
+/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+struct drm_set_client_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+#define DRM_RDWR O_RDWR
+#define DRM_CLOEXEC O_CLOEXEC
+struct drm_prime_handle {
+ __u32 handle;
+
+ /** Flags.. only applicable for handle->fd */
+ __u32 flags;
+
+ /** Returned dmabuf file descriptor */
+ __s32 fd;
+};
+
+struct drm_syncobj_create {
+ __u32 handle;
+#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
+ __u32 flags;
+};
+
+struct drm_syncobj_destroy {
+ __u32 handle;
+ __u32 pad;
+};
+
+#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
+struct drm_syncobj_handle {
+ __u32 handle;
+ __u32 flags;
+
+ __s32 fd;
+ __u32 pad;
+};
+
+struct drm_syncobj_transfer {
+ __u32 src_handle;
+ __u32 dst_handle;
+ __u64 src_point;
+ __u64 dst_point;
+ __u32 flags;
+ __u32 pad;
+};
+
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
+struct drm_syncobj_wait {
+ __u64 handles;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
+};
+
+struct drm_syncobj_timeline_wait {
+ __u64 handles;
+ /* wait on specific timeline point for every handles*/
+ __u64 points;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
+};
+
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ * available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+ __s32 fd;
+ __u32 pad;
+};
+
+
+struct drm_syncobj_array {
+ __u64 handles;
+ __u32 count_handles;
+ __u32 pad;
+};
+
+#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
+struct drm_syncobj_timeline_array {
+ __u64 handles;
+ __u64 points;
+ __u32 count_handles;
+ __u32 flags;
+};
+
+
+/* Query current scanout sequence number */
+struct drm_crtc_get_sequence {
+ __u32 crtc_id; /* requested crtc_id */
+ __u32 active; /* return: crtc output is active */
+ __u64 sequence; /* return: most recent vblank sequence */
+ __s64 sequence_ns; /* return: most recent time of first pixel out */
+};
+
+/* Queue event to be delivered at specified sequence. Time stamp marks
+ * when the first pixel of the refresh cycle leaves the display engine
+ * for the display
+ */
+#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
+#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
+
+struct drm_crtc_queue_sequence {
+ __u32 crtc_id;
+ __u32 flags;
+ __u64 sequence; /* on input, target sequence. on output, actual sequence */
+ __u64 user_data; /* user data passed to event */
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#include "drm_mode.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+/**
+ * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
+ *
+ * GEM handles are not reference-counted by the kernel. User-space is
+ * responsible for managing their lifetime. For example, if user-space imports
+ * the same memory object twice on the same DRM file description, the same GEM
+ * handle is returned by both imports, and user-space needs to ensure
+ * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
+ * when a memory object is allocated, then exported and imported again on the
+ * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
+ * and always returns fresh new GEM handles even if an existing GEM handle
+ * already refers to the same memory object before the IOCTL is performed.
+ */
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
+#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
+#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+
+/**
+ * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
+ *
+ * User-space sets &drm_prime_handle.handle with the GEM handle to export and
+ * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
+ * &drm_prime_handle.fd.
+ *
+ * The export can fail for any driver-specific reason, e.g. because export is
+ * not supported for this specific GEM handle (but might be for others).
+ *
+ * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
+ */
+#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
+/**
+ * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
+ *
+ * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
+ * import, and gets back a GEM handle in &drm_prime_handle.handle.
+ * &drm_prime_handle.flags is unused.
+ *
+ * If an existing GEM handle refers to the memory object backing the DMA-BUF,
+ * that GEM handle is returned. Therefore user-space which needs to handle
+ * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
+ * reference-count duplicated GEM handles. For more information see
+ * &DRM_IOCTL_GEM_CLOSE.
+ *
+ * The import can fail for any driver-specific reason, e.g. because import is
+ * only supported for DMA-BUFs allocated on this DRM device.
+ *
+ * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
+ */
+#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
+
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+
+#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
+#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
+
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+
+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
+#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
+#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
+#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
+#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
+
+#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
+#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
+#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
+#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+/**
+ * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
+ *
+ * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * Warning: removing a framebuffer currently in-use on an enabled plane will
+ * disable that plane. The CRTC the plane is linked to may also be disabled
+ * (depending on driver capabilities).
+ */
+#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
+#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+
+/**
+ * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
+ *
+ * KMS dumb buffers provide a very primitive way to allocate a buffer object
+ * suitable for scanout and map it for software rendering. KMS dumb buffers are
+ * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
+ * buffers are not suitable to be displayed on any other device than the KMS
+ * device where they were allocated from. Also see
+ * :ref:`kms_dumb_buffer_objects`.
+ *
+ * The IOCTL argument is a struct drm_mode_create_dumb.
+ *
+ * User-space is expected to create a KMS dumb buffer via this IOCTL, then add
+ * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
+ * &DRM_IOCTL_MODE_MAP_DUMB.
+ *
+ * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
+ * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
+ * driver preferences for dumb buffers.
+ */
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
+#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
+#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
+#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
+#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
+#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
+#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
+
+#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create)
+#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
+#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
+#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
+#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
+#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
+#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
+
+#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
+#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
+#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
+#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
+#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+
+/**
+ * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
+ *
+ * This queries metadata about a framebuffer. User-space fills
+ * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
+ * struct as the output.
+ *
+ * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
+ * will be filled with GEM buffer handles. Fresh new GEM handles are always
+ * returned, even if another GEM handle referring to the same memory object
+ * already exists on the DRM file description. The caller is responsible for
+ * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
+ * new handle will be returned for multiple planes in case they use the same
+ * memory object. Planes are valid until one has a zero handle -- this can be
+ * used to compute the number of planes.
+ *
+ * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
+ * until one has a zero &drm_mode_fb_cmd2.pitches.
+ *
+ * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
+ * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
+ * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
+ *
+ * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
+ * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
+ * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
+ * double-close handles which are specified multiple times in the array.
+ */
+#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+
+#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+
+/**
+ * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
+ *
+ * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
+ * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
+ * alive. When the plane no longer uses the framebuffer (because the
+ * framebuffer is replaced with another one, or the plane is disabled), the
+ * framebuffer is cleaned up.
+ *
+ * This is useful to implement flicker-free transitions between two processes.
+ *
+ * Depending on the threat model, user-space may want to ensure that the
+ * framebuffer doesn't expose any sensitive user information: closed
+ * framebuffers attached to a plane can be read back by the next DRM master.
+ */
+#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
+
+/*
+ * Device specific ioctls should only be in their respective headers
+ * The device specific ioctl range is from 0x40 to 0x9f.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+ * drmCommandReadWrite().
+ */
+#define DRM_COMMAND_BASE 0x40
+#define DRM_COMMAND_END 0xA0
+
+/**
+ * struct drm_event - Header for DRM events
+ * @type: event type.
+ * @length: total number of payload bytes (including header).
+ *
+ * This struct is a header for events written back to user-space on the DRM FD.
+ * A read on the DRM FD will always only return complete events: e.g. if the
+ * read buffer is 100 bytes large and there are two 64 byte events pending,
+ * only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
+ * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
+ * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
+ */
+struct drm_event {
+ __u32 type;
+ __u32 length;
+};
+
+/**
+ * DRM_EVENT_VBLANK - vertical blanking event
+ *
+ * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
+ * &_DRM_VBLANK_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
+#define DRM_EVENT_VBLANK 0x01
+/**
+ * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
+ *
+ * This event is sent in response to an atomic commit or legacy page-flip with
+ * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+/**
+ * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
+ *
+ * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
+ *
+ * The event payload is a struct drm_event_crtc_sequence.
+ */
+#define DRM_EVENT_CRTC_SEQUENCE 0x03
+
+struct drm_event_vblank {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 sequence;
+ __u32 crtc_id; /* 0 on older kernels that do not support this */
+};
+
+/* Event delivered at sequence. Time stamp marks when the first pixel
+ * of the refresh cycle leaves the display engine for the display
+ */
+struct drm_event_crtc_sequence {
+ struct drm_event base;
+ __u64 user_data;
+ __s64 time_ns;
+ __u64 sequence;
+};
+
+/* typedef area */
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/src/drm_mode.h b/src/drm_mode.h
new file mode 100644
index 0000000..d390011
--- /dev/null
+++ b/src/drm_mode.h
@@ -0,0 +1,1360 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2007-2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_MODE_H
+#define _DRM_MODE_H
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: overview
+ *
+ * DRM exposes many UAPI and structure definitions to have a consistent
+ * and standardized interface with users.
+ * Userspace can refer to these structure definitions and UAPI formats
+ * to communicate to drivers.
+ */
+
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN (1<<0) /* deprecated */
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
+#define DRM_MODE_TYPE_PREFERRED (1<<3)
+#define DRM_MODE_TYPE_DEFAULT (1<<4) /* deprecated */
+#define DRM_MODE_TYPE_USERDEF (1<<5)
+#define DRM_MODE_TYPE_DRIVER (1<<6)
+
+#define DRM_MODE_TYPE_ALL (DRM_MODE_TYPE_PREFERRED | \
+ DRM_MODE_TYPE_USERDEF | \
+ DRM_MODE_TYPE_DRIVER)
+
+/* Video mode flags */
+/* bit compatible with the xrandr RR_ definitions (bits 0-13)
+ *
+ * ABI warning: Existing userspace really expects
+ * the mode flags to match the xrandr definitions. Any
+ * changes that don't match the xrandr definitions will
+ * likely need a new client cap or some other mechanism
+ * to avoid breaking existing userspace. This includes
+ * allocating new flags in the previously unused bits!
+ */
+#define DRM_MODE_FLAG_PHSYNC (1<<0)
+#define DRM_MODE_FLAG_NHSYNC (1<<1)
+#define DRM_MODE_FLAG_PVSYNC (1<<2)
+#define DRM_MODE_FLAG_NVSYNC (1<<3)
+#define DRM_MODE_FLAG_INTERLACE (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN (1<<5)
+#define DRM_MODE_FLAG_CSYNC (1<<6)
+#define DRM_MODE_FLAG_PCSYNC (1<<7)
+#define DRM_MODE_FLAG_NCSYNC (1<<8)
+#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST (1<<10) /* deprecated */
+#define DRM_MODE_FLAG_PIXMUX (1<<11) /* deprecated */
+#define DRM_MODE_FLAG_DBLCLK (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
+ /*
+ * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
+ * (define not exposed to user space).
+ */
+#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
+#define DRM_MODE_FLAG_3D_NONE (0<<14)
+#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
+#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
+#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
+#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE 0
+#define DRM_MODE_PICTURE_ASPECT_4_3 1
+#define DRM_MODE_PICTURE_ASPECT_16_9 2
+#define DRM_MODE_PICTURE_ASPECT_64_27 3
+#define DRM_MODE_PICTURE_ASPECT_256_135 4
+
+/* Content type options */
+#define DRM_MODE_CONTENT_TYPE_NO_DATA 0
+#define DRM_MODE_CONTENT_TYPE_GRAPHICS 1
+#define DRM_MODE_CONTENT_TYPE_PHOTO 2
+#define DRM_MODE_CONTENT_TYPE_CINEMA 3
+#define DRM_MODE_CONTENT_TYPE_GAME 4
+
+/* Aspect ratio flag bitmask (4 bits 22:19) */
+#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
+#define DRM_MODE_FLAG_PIC_AR_NONE \
+ (DRM_MODE_PICTURE_ASPECT_NONE<<19)
+#define DRM_MODE_FLAG_PIC_AR_4_3 \
+ (DRM_MODE_PICTURE_ASPECT_4_3<<19)
+#define DRM_MODE_FLAG_PIC_AR_16_9 \
+ (DRM_MODE_PICTURE_ASPECT_16_9<<19)
+#define DRM_MODE_FLAG_PIC_AR_64_27 \
+ (DRM_MODE_PICTURE_ASPECT_64_27<<19)
+#define DRM_MODE_FLAG_PIC_AR_256_135 \
+ (DRM_MODE_PICTURE_ASPECT_256_135<<19)
+
+#define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \
+ DRM_MODE_FLAG_NHSYNC | \
+ DRM_MODE_FLAG_PVSYNC | \
+ DRM_MODE_FLAG_NVSYNC | \
+ DRM_MODE_FLAG_INTERLACE | \
+ DRM_MODE_FLAG_DBLSCAN | \
+ DRM_MODE_FLAG_CSYNC | \
+ DRM_MODE_FLAG_PCSYNC | \
+ DRM_MODE_FLAG_NCSYNC | \
+ DRM_MODE_FLAG_HSKEW | \
+ DRM_MODE_FLAG_DBLCLK | \
+ DRM_MODE_FLAG_CLKDIV2 | \
+ DRM_MODE_FLAG_3D_MASK)
+
+/* DPMS flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_DPMS_ON 0
+#define DRM_MODE_DPMS_STANDBY 1
+#define DRM_MODE_DPMS_SUSPEND 2
+#define DRM_MODE_DPMS_OFF 3
+
+/* Scaling mode options */
+#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
+ software can still scale) */
+#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
+#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
+#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
+
+/* Dithering mode options */
+#define DRM_MODE_DITHERING_OFF 0
+#define DRM_MODE_DITHERING_ON 1
+#define DRM_MODE_DITHERING_AUTO 2
+
+/* Dirty info options */
+#define DRM_MODE_DIRTY_OFF 0
+#define DRM_MODE_DIRTY_ON 1
+#define DRM_MODE_DIRTY_ANNOTATE 2
+
+/* Link Status options */
+#define DRM_MODE_LINK_STATUS_GOOD 0
+#define DRM_MODE_LINK_STATUS_BAD 1
+
+/*
+ * DRM_MODE_ROTATE_<degrees>
+ *
+ * Signals that a drm plane is been rotated <degrees> degrees in counter
+ * clockwise direction.
+ *
+ * This define is provided as a convenience, looking up the property id
+ * using the name->prop id lookup is the preferred method.
+ */
+#define DRM_MODE_ROTATE_0 (1<<0)
+#define DRM_MODE_ROTATE_90 (1<<1)
+#define DRM_MODE_ROTATE_180 (1<<2)
+#define DRM_MODE_ROTATE_270 (1<<3)
+
+/*
+ * DRM_MODE_ROTATE_MASK
+ *
+ * Bitmask used to look for drm plane rotations.
+ */
+#define DRM_MODE_ROTATE_MASK (\
+ DRM_MODE_ROTATE_0 | \
+ DRM_MODE_ROTATE_90 | \
+ DRM_MODE_ROTATE_180 | \
+ DRM_MODE_ROTATE_270)
+
+/*
+ * DRM_MODE_REFLECT_<axis>
+ *
+ * Signals that the contents of a drm plane is reflected along the <axis> axis,
+ * in the same way as mirroring.
+ * See kerneldoc chapter "Plane Composition Properties" for more details.
+ *
+ * This define is provided as a convenience, looking up the property id
+ * using the name->prop id lookup is the preferred method.
+ */
+#define DRM_MODE_REFLECT_X (1<<4)
+#define DRM_MODE_REFLECT_Y (1<<5)
+
+/*
+ * DRM_MODE_REFLECT_MASK
+ *
+ * Bitmask used to look for drm plane reflections.
+ */
+#define DRM_MODE_REFLECT_MASK (\
+ DRM_MODE_REFLECT_X | \
+ DRM_MODE_REFLECT_Y)
+
+/* Content Protection Flags */
+#define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0
+#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
+#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
+
+/**
+ * struct drm_mode_modeinfo - Display mode information.
+ * @clock: pixel clock in kHz
+ * @hdisplay: horizontal display size
+ * @hsync_start: horizontal sync start
+ * @hsync_end: horizontal sync end
+ * @htotal: horizontal total size
+ * @hskew: horizontal skew
+ * @vdisplay: vertical display size
+ * @vsync_start: vertical sync start
+ * @vsync_end: vertical sync end
+ * @vtotal: vertical total size
+ * @vscan: vertical scan
+ * @vrefresh: approximate vertical refresh rate in Hz
+ * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines
+ * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines
+ * @name: string describing the mode resolution
+ *
+ * This is the user-space API display mode information structure. For the
+ * kernel version see struct drm_display_mode.
+ */
+struct drm_mode_modeinfo {
+ __u32 clock;
+ __u16 hdisplay;
+ __u16 hsync_start;
+ __u16 hsync_end;
+ __u16 htotal;
+ __u16 hskew;
+ __u16 vdisplay;
+ __u16 vsync_start;
+ __u16 vsync_end;
+ __u16 vtotal;
+ __u16 vscan;
+
+ __u32 vrefresh;
+
+ __u32 flags;
+ __u32 type;
+ char name[DRM_DISPLAY_MODE_LEN];
+};
+
+struct drm_mode_card_res {
+ __u64 fb_id_ptr;
+ __u64 crtc_id_ptr;
+ __u64 connector_id_ptr;
+ __u64 encoder_id_ptr;
+ __u32 count_fbs;
+ __u32 count_crtcs;
+ __u32 count_connectors;
+ __u32 count_encoders;
+ __u32 min_width;
+ __u32 max_width;
+ __u32 min_height;
+ __u32 max_height;
+};
+
+struct drm_mode_crtc {
+ __u64 set_connectors_ptr;
+ __u32 count_connectors;
+
+ __u32 crtc_id; /**< Id */
+ __u32 fb_id; /**< Id of framebuffer */
+
+ __u32 x; /**< x Position on the framebuffer */
+ __u32 y; /**< y Position on the framebuffer */
+
+ __u32 gamma_size;
+ __u32 mode_valid;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
+#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
+
+/* Planes blend with or override other bits on the CRTC */
+struct drm_mode_set_plane {
+ __u32 plane_id;
+ __u32 crtc_id;
+ __u32 fb_id; /* fb object contains surface format type */
+ __u32 flags; /* see above flags */
+
+ /* Signed dest location allows it to be partially off screen */
+ __s32 crtc_x;
+ __s32 crtc_y;
+ __u32 crtc_w;
+ __u32 crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ __u32 src_x;
+ __u32 src_y;
+ __u32 src_h;
+ __u32 src_w;
+};
+
+/**
+ * struct drm_mode_get_plane - Get plane metadata.
+ *
+ * Userspace can perform a GETPLANE ioctl to retrieve information about a
+ * plane.
+ *
+ * To retrieve the number of formats supported, set @count_format_types to zero
+ * and call the ioctl. @count_format_types will be updated with the value.
+ *
+ * To retrieve these formats, allocate an array with the memory needed to store
+ * @count_format_types formats. Point @format_type_ptr to this array and call
+ * the ioctl again (with @count_format_types still set to the value returned in
+ * the first ioctl call).
+ */
+struct drm_mode_get_plane {
+ /**
+ * @plane_id: Object ID of the plane whose information should be
+ * retrieved. Set by caller.
+ */
+ __u32 plane_id;
+
+ /** @crtc_id: Object ID of the current CRTC. */
+ __u32 crtc_id;
+ /** @fb_id: Object ID of the current fb. */
+ __u32 fb_id;
+
+ /**
+ * @possible_crtcs: Bitmask of CRTC's compatible with the plane. CRTC's
+ * are created and they receive an index, which corresponds to their
+ * position in the bitmask. Bit N corresponds to
+ * :ref:`CRTC index<crtc_index>` N.
+ */
+ __u32 possible_crtcs;
+ /** @gamma_size: Never used. */
+ __u32 gamma_size;
+
+ /** @count_format_types: Number of formats. */
+ __u32 count_format_types;
+ /**
+ * @format_type_ptr: Pointer to ``__u32`` array of formats that are
+ * supported by the plane. These formats do not require modifiers.
+ */
+ __u64 format_type_ptr;
+};
+
+struct drm_mode_get_plane_res {
+ __u64 plane_id_ptr;
+ __u32 count_planes;
+};
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
+#define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI 6
+#define DRM_MODE_ENCODER_DPMST 7
+#define DRM_MODE_ENCODER_DPI 8
+
+struct drm_mode_get_encoder {
+ __u32 encoder_id;
+ __u32 encoder_type;
+
+ __u32 crtc_id; /**< Id of crtc */
+
+ __u32 possible_crtcs;
+ __u32 possible_clones;
+};
+
+/* This is for connectors with multiple signal types. */
+/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
+enum drm_mode_subconnector {
+ DRM_MODE_SUBCONNECTOR_Automatic = 0, /* DVI-I, TV */
+ DRM_MODE_SUBCONNECTOR_Unknown = 0, /* DVI-I, TV, DP */
+ DRM_MODE_SUBCONNECTOR_VGA = 1, /* DP */
+ DRM_MODE_SUBCONNECTOR_DVID = 3, /* DVI-I DP */
+ DRM_MODE_SUBCONNECTOR_DVIA = 4, /* DVI-I */
+ DRM_MODE_SUBCONNECTOR_Composite = 5, /* TV */
+ DRM_MODE_SUBCONNECTOR_SVIDEO = 6, /* TV */
+ DRM_MODE_SUBCONNECTOR_Component = 8, /* TV */
+ DRM_MODE_SUBCONNECTOR_SCART = 9, /* TV */
+ DRM_MODE_SUBCONNECTOR_DisplayPort = 10, /* DP */
+ DRM_MODE_SUBCONNECTOR_HDMIA = 11, /* DP */
+ DRM_MODE_SUBCONNECTOR_Native = 15, /* DP */
+ DRM_MODE_SUBCONNECTOR_Wireless = 18, /* DP */
+};
+
+#define DRM_MODE_CONNECTOR_Unknown 0
+#define DRM_MODE_CONNECTOR_VGA 1
+#define DRM_MODE_CONNECTOR_DVII 2
+#define DRM_MODE_CONNECTOR_DVID 3
+#define DRM_MODE_CONNECTOR_DVIA 4
+#define DRM_MODE_CONNECTOR_Composite 5
+#define DRM_MODE_CONNECTOR_SVIDEO 6
+#define DRM_MODE_CONNECTOR_LVDS 7
+#define DRM_MODE_CONNECTOR_Component 8
+#define DRM_MODE_CONNECTOR_9PinDIN 9
+#define DRM_MODE_CONNECTOR_DisplayPort 10
+#define DRM_MODE_CONNECTOR_HDMIA 11
+#define DRM_MODE_CONNECTOR_HDMIB 12
+#define DRM_MODE_CONNECTOR_TV 13
+#define DRM_MODE_CONNECTOR_eDP 14
+#define DRM_MODE_CONNECTOR_VIRTUAL 15
+#define DRM_MODE_CONNECTOR_DSI 16
+#define DRM_MODE_CONNECTOR_DPI 17
+#define DRM_MODE_CONNECTOR_WRITEBACK 18
+#define DRM_MODE_CONNECTOR_SPI 19
+#define DRM_MODE_CONNECTOR_USB 20
+
+/**
+ * struct drm_mode_get_connector - Get connector metadata.
+ *
+ * User-space can perform a GETCONNECTOR ioctl to retrieve information about a
+ * connector. User-space is expected to retrieve encoders, modes and properties
+ * by performing this ioctl at least twice: the first time to retrieve the
+ * number of elements, the second time to retrieve the elements themselves.
+ *
+ * To retrieve the number of elements, set @count_props and @count_encoders to
+ * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct
+ * drm_mode_modeinfo element.
+ *
+ * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr,
+ * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and
+ * @count_encoders to their capacity.
+ *
+ * Performing the ioctl only twice may be racy: the number of elements may have
+ * changed with a hotplug event in-between the two ioctls. User-space is
+ * expected to retry the last ioctl until the number of elements stabilizes.
+ * The kernel won't fill any array which doesn't have the expected length.
+ *
+ * **Force-probing a connector**
+ *
+ * If the @count_modes field is set to zero and the DRM client is the current
+ * DRM master, the kernel will perform a forced probe on the connector to
+ * refresh the connector status, modes and EDID. A forced-probe can be slow,
+ * might cause flickering and the ioctl will block.
+ *
+ * User-space needs to force-probe connectors to ensure their metadata is
+ * up-to-date at startup and after receiving a hot-plug event. User-space
+ * may perform a forced-probe when the user explicitly requests it. User-space
+ * shouldn't perform a forced-probe in other situations.
+ */
+struct drm_mode_get_connector {
+ /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
+ __u64 encoders_ptr;
+ /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */
+ __u64 modes_ptr;
+ /** @props_ptr: Pointer to ``__u32`` array of property IDs. */
+ __u64 props_ptr;
+ /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */
+ __u64 prop_values_ptr;
+
+ /** @count_modes: Number of modes. */
+ __u32 count_modes;
+ /** @count_props: Number of properties. */
+ __u32 count_props;
+ /** @count_encoders: Number of encoders. */
+ __u32 count_encoders;
+
+ /** @encoder_id: Object ID of the current encoder. */
+ __u32 encoder_id;
+ /** @connector_id: Object ID of the connector. */
+ __u32 connector_id;
+ /**
+ * @connector_type: Type of the connector.
+ *
+ * See DRM_MODE_CONNECTOR_* defines.
+ */
+ __u32 connector_type;
+ /**
+ * @connector_type_id: Type-specific connector number.
+ *
+ * This is not an object ID. This is a per-type connector number. Each
+ * (type, type_id) combination is unique across all connectors of a DRM
+ * device.
+ *
+ * The (type, type_id) combination is not a stable identifier: the
+ * type_id can change depending on the driver probe order.
+ */
+ __u32 connector_type_id;
+
+ /**
+ * @connection: Status of the connector.
+ *
+ * See enum drm_connector_status.
+ */
+ __u32 connection;
+ /** @mm_width: Width of the connected sink in millimeters. */
+ __u32 mm_width;
+ /** @mm_height: Height of the connected sink in millimeters. */
+ __u32 mm_height;
+ /**
+ * @subpixel: Subpixel order of the connected sink.
+ *
+ * See enum subpixel_order.
+ */
+ __u32 subpixel;
+
+ /** @pad: Padding, must be zero. */
+ __u32 pad;
+};
+
+#define DRM_MODE_PROP_PENDING (1<<0) /* deprecated, do not use */
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
+#define DRM_MODE_PROP_BLOB (1<<4)
+#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
+
+/* non-extended types: legacy bitmask, one bit per type: */
+#define DRM_MODE_PROP_LEGACY_TYPE ( \
+ DRM_MODE_PROP_RANGE | \
+ DRM_MODE_PROP_ENUM | \
+ DRM_MODE_PROP_BLOB | \
+ DRM_MODE_PROP_BITMASK)
+
+/* extended-types: rather than continue to consume a bit per type,
+ * grab a chunk of the bits to use as integer type id.
+ */
+#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
+#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
+#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
+#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
+
+/* the PROP_ATOMIC flag is used to hide properties from userspace that
+ * is not aware of atomic properties. This is mostly to work around
+ * older userspace (DDX drivers) that read/write each prop they find,
+ * without being aware that this could be triggering a lengthy modeset.
+ */
+#define DRM_MODE_PROP_ATOMIC 0x80000000
+
+/**
+ * struct drm_mode_property_enum - Description for an enum/bitfield entry.
+ * @value: numeric value for this enum entry.
+ * @name: symbolic name for this enum entry.
+ *
+ * See struct drm_property_enum for details.
+ */
+struct drm_mode_property_enum {
+ __u64 value;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+/**
+ * struct drm_mode_get_property - Get property metadata.
+ *
+ * User-space can perform a GETPROPERTY ioctl to retrieve information about a
+ * property. The same property may be attached to multiple objects, see
+ * "Modeset Base Object Abstraction".
+ *
+ * The meaning of the @values_ptr field changes depending on the property type.
+ * See &drm_property.flags for more details.
+ *
+ * The @enum_blob_ptr and @count_enum_blobs fields are only meaningful when the
+ * property has the type &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK. For
+ * backwards compatibility, the kernel will always set @count_enum_blobs to
+ * zero when the property has the type &DRM_MODE_PROP_BLOB. User-space must
+ * ignore these two fields if the property has a different type.
+ *
+ * User-space is expected to retrieve values and enums by performing this ioctl
+ * at least twice: the first time to retrieve the number of elements, the
+ * second time to retrieve the elements themselves.
+ *
+ * To retrieve the number of elements, set @count_values and @count_enum_blobs
+ * to zero, then call the ioctl. @count_values will be updated with the number
+ * of elements. If the property has the type &DRM_MODE_PROP_ENUM or
+ * &DRM_MODE_PROP_BITMASK, @count_enum_blobs will be updated as well.
+ *
+ * To retrieve the elements themselves, allocate an array for @values_ptr and
+ * set @count_values to its capacity. If the property has the type
+ * &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK, allocate an array for
+ * @enum_blob_ptr and set @count_enum_blobs to its capacity. Calling the ioctl
+ * again will fill the arrays.
+ */
+struct drm_mode_get_property {
+ /** @values_ptr: Pointer to a ``__u64`` array. */
+ __u64 values_ptr;
+ /** @enum_blob_ptr: Pointer to a struct drm_mode_property_enum array. */
+ __u64 enum_blob_ptr;
+
+ /**
+ * @prop_id: Object ID of the property which should be retrieved. Set
+ * by the caller.
+ */
+ __u32 prop_id;
+ /**
+ * @flags: ``DRM_MODE_PROP_*`` bitfield. See &drm_property.flags for
+ * a definition of the flags.
+ */
+ __u32 flags;
+ /**
+ * @name: Symbolic property name. User-space should use this field to
+ * recognize properties.
+ */
+ char name[DRM_PROP_NAME_LEN];
+
+ /** @count_values: Number of elements in @values_ptr. */
+ __u32 count_values;
+ /** @count_enum_blobs: Number of elements in @enum_blob_ptr. */
+ __u32 count_enum_blobs;
+};
+
+struct drm_mode_connector_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 connector_id;
+};
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+#define DRM_MODE_OBJECT_ANY 0
+
+struct drm_mode_obj_get_properties {
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u32 count_props;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_obj_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_get_blob {
+ __u32 blob_id;
+ __u32 length;
+ __u64 data;
+};
+
+struct drm_mode_fb_cmd {
+ __u32 fb_id;
+ __u32 width;
+ __u32 height;
+ __u32 pitch;
+ __u32 bpp;
+ __u32 depth;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */
+
+/**
+ * struct drm_mode_fb_cmd2 - Frame-buffer metadata.
+ *
+ * This struct holds frame-buffer metadata. There are two ways to use it:
+ *
+ * - User-space can fill this struct and perform a &DRM_IOCTL_MODE_ADDFB2
+ * ioctl to register a new frame-buffer. The new frame-buffer object ID will
+ * be set by the kernel in @fb_id.
+ * - User-space can set @fb_id and perform a &DRM_IOCTL_MODE_GETFB2 ioctl to
+ * fetch metadata about an existing frame-buffer.
+ *
+ * In case of planar formats, this struct allows up to 4 buffer objects with
+ * offsets and pitches per plane. The pitch and offset order are dictated by
+ * the format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8-bit Y samples followed by an
+ * interleaved U/V plane containing 8-bit 2x2 subsampled colour difference
+ * samples.
+ *
+ * So it would consist of a Y plane at ``offsets[0]`` and a UV plane at
+ * ``offsets[1]``.
+ *
+ * To accommodate tiled, compressed, etc formats, a modifier can be specified.
+ * For more information see the "Format Modifiers" section. Note that even
+ * though it looks like we have a modifier per-plane, we in fact do not. The
+ * modifier for each plane must be identical. Thus all combinations of
+ * different data layouts for multi-plane formats must be enumerated as
+ * separate modifiers.
+ *
+ * All of the entries in @handles, @pitches, @offsets and @modifier must be
+ * zero when unused. Warning, for @offsets and @modifier zero can't be used to
+ * figure out whether the entry is used or not since it's a valid value (a zero
+ * offset is common, and a zero modifier is &DRM_FORMAT_MOD_LINEAR).
+ */
+struct drm_mode_fb_cmd2 {
+ /** @fb_id: Object ID of the frame-buffer. */
+ __u32 fb_id;
+ /** @width: Width of the frame-buffer. */
+ __u32 width;
+ /** @height: Height of the frame-buffer. */
+ __u32 height;
+ /**
+ * @pixel_format: FourCC format code, see ``DRM_FORMAT_*`` constants in
+ * ``drm_fourcc.h``.
+ */
+ __u32 pixel_format;
+ /**
+ * @flags: Frame-buffer flags (see &DRM_MODE_FB_INTERLACED and
+ * &DRM_MODE_FB_MODIFIERS).
+ */
+ __u32 flags;
+
+ /**
+ * @handles: GEM buffer handle, one per plane. Set to 0 if the plane is
+ * unused. The same handle can be used for multiple planes.
+ */
+ __u32 handles[4];
+ /** @pitches: Pitch (aka. stride) in bytes, one per plane. */
+ __u32 pitches[4];
+ /** @offsets: Offset into the buffer in bytes, one per plane. */
+ __u32 offsets[4];
+ /**
+ * @modifier: Format modifier, one per plane. See ``DRM_FORMAT_MOD_*``
+ * constants in ``drm_fourcc.h``. All planes must use the same
+ * modifier. Ignored unless &DRM_MODE_FB_MODIFIERS is set in @flags.
+ */
+ __u64 modifier[4];
+};
+
+#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+#define DRM_MODE_FB_DIRTY_FLAGS 0x03
+
+#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
+
+/*
+ * Mark a region of a framebuffer as dirty.
+ *
+ * Some hardware does not automatically update display contents
+ * as a hardware or software draw to a framebuffer. This ioctl
+ * allows userspace to tell the kernel and the hardware what
+ * regions of the framebuffer have changed.
+ *
+ * The kernel or hardware is free to update more then just the
+ * region specified by the clip rects. The kernel or hardware
+ * may also delay and/or coalesce several calls to dirty into a
+ * single update.
+ *
+ * Userspace may annotate the updates, the annotates are a
+ * promise made by the caller that the change is either a copy
+ * of pixels or a fill of a single color in the region specified.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
+ * the number of updated regions are half of num_clips given,
+ * where the clip rects are paired in src and dst. The width and
+ * height of each one of the pairs must match.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
+ * promises that the region specified of the clip rects is filled
+ * completely with a single color as given in the color argument.
+ */
+
+struct drm_mode_fb_dirty_cmd {
+ __u32 fb_id;
+ __u32 flags;
+ __u32 color;
+ __u32 num_clips;
+ __u64 clips_ptr;
+};
+
+struct drm_mode_mode_cmd {
+ __u32 connector_id;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_CURSOR_BO 0x01
+#define DRM_MODE_CURSOR_MOVE 0x02
+#define DRM_MODE_CURSOR_FLAGS 0x03
+
+/*
+ * depending on the value in flags different members are used.
+ *
+ * CURSOR_BO uses
+ * crtc_id
+ * width
+ * height
+ * handle - if 0 turns the cursor off
+ *
+ * CURSOR_MOVE uses
+ * crtc_id
+ * x
+ * y
+ */
+struct drm_mode_cursor {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+struct drm_mode_cursor2 {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+ __s32 hot_x;
+ __s32 hot_y;
+};
+
+struct drm_mode_crtc_lut {
+ __u32 crtc_id;
+ __u32 gamma_size;
+
+ /* pointers to arrays */
+ __u64 red;
+ __u64 green;
+ __u64 blue;
+};
+
+struct drm_color_ctm {
+ /*
+ * Conversion matrix in S31.32 sign-magnitude
+ * (not two's complement!) format.
+ *
+ * out matrix in
+ * |R| |0 1 2| |R|
+ * |G| = |3 4 5| x |G|
+ * |B| |6 7 8| |B|
+ */
+ __u64 matrix[9];
+};
+
+struct drm_color_lut {
+ /*
+ * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
+ * 0xffff == 1.0.
+ */
+ __u16 red;
+ __u16 green;
+ __u16 blue;
+ __u16 reserved;
+};
+
+/**
+ * struct drm_plane_size_hint - Plane size hints
+ *
+ * The plane SIZE_HINTS property blob contains an
+ * array of struct drm_plane_size_hint.
+ */
+struct drm_plane_size_hint {
+ __u16 width;
+ __u16 height;
+};
+
+/**
+ * struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
+ *
+ * HDR Metadata Infoframe as per CTA 861.G spec. This is expected
+ * to match exactly with the spec.
+ *
+ * Userspace is expected to pass the metadata information as per
+ * the format described in this structure.
+ */
+struct hdr_metadata_infoframe {
+ /**
+ * @eotf: Electro-Optical Transfer Function (EOTF)
+ * used in the stream.
+ */
+ __u8 eotf;
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u8 metadata_type;
+ /**
+ * @display_primaries: Color Primaries of the Data.
+ * These are coded as unsigned 16-bit values in units of
+ * 0.00002, where 0x0000 represents zero and 0xC350
+ * represents 1.0000.
+ * @display_primaries.x: X coordinate of color primary.
+ * @display_primaries.y: Y coordinate of color primary.
+ */
+ struct {
+ __u16 x, y;
+ } display_primaries[3];
+ /**
+ * @white_point: White Point of Colorspace Data.
+ * These are coded as unsigned 16-bit values in units of
+ * 0.00002, where 0x0000 represents zero and 0xC350
+ * represents 1.0000.
+ * @white_point.x: X coordinate of whitepoint of color primary.
+ * @white_point.y: Y coordinate of whitepoint of color primary.
+ */
+ struct {
+ __u16 x, y;
+ } white_point;
+ /**
+ * @max_display_mastering_luminance: Max Mastering Display Luminance.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_display_mastering_luminance;
+ /**
+ * @min_display_mastering_luminance: Min Mastering Display Luminance.
+ * This value is coded as an unsigned 16-bit value in units of
+ * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF
+ * represents 6.5535 cd/m2.
+ */
+ __u16 min_display_mastering_luminance;
+ /**
+ * @max_cll: Max Content Light Level.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_cll;
+ /**
+ * @max_fall: Max Frame Average Light Level.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_fall;
+};
+
+/**
+ * struct hdr_output_metadata - HDR output metadata
+ *
+ * Metadata Information to be passed from userspace
+ */
+struct hdr_output_metadata {
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u32 metadata_type;
+ /**
+ * @hdmi_metadata_type1: HDR Metadata Infoframe.
+ */
+ union {
+ struct hdr_metadata_infoframe hdmi_metadata_type1;
+ };
+};
+
+/**
+ * DRM_MODE_PAGE_FLIP_EVENT
+ *
+ * Request that the kernel sends back a vblank event (see
+ * struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the
+ * page-flip is done.
+ */
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+/**
+ * DRM_MODE_PAGE_FLIP_ASYNC
+ *
+ * Request that the page-flip is performed as soon as possible, ie. with no
+ * delay due to waiting for vblank. This may cause tearing to be visible on
+ * the screen.
+ *
+ * When used with atomic uAPI, the driver will return an error if the hardware
+ * doesn't support performing an asynchronous page-flip for this update.
+ * User-space should handle this, e.g. by falling back to a regular page-flip.
+ *
+ * Note, some hardware might need to perform one last synchronous page-flip
+ * before being able to switch to asynchronous page-flips. As an exception,
+ * the driver will return success even though that first page-flip is not
+ * asynchronous.
+ */
+#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
+#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
+#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
+#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
+ DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
+/**
+ * DRM_MODE_PAGE_FLIP_FLAGS
+ *
+ * Bitmask of flags suitable for &drm_mode_crtc_page_flip_target.flags.
+ */
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
+ DRM_MODE_PAGE_FLIP_ASYNC | \
+ DRM_MODE_PAGE_FLIP_TARGET)
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * This ioctl will ask KMS to schedule a page flip for the specified
+ * crtc. Once any pending rendering targeting the specified fb (as of
+ * ioctl time) has completed, the crtc will be reprogrammed to display
+ * that fb after the next vertical refresh. The ioctl returns
+ * immediately, but subsequent rendering to the current fb will block
+ * in the execbuffer ioctl until the page flip happens. If a page
+ * flip is already pending as the ioctl is called, EBUSY will be
+ * returned.
+ *
+ * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
+ * event (see drm.h: struct drm_event_vblank) when the page flip is
+ * done. The user_data field passed in with this ioctl will be
+ * returned as the user_data field in the vblank event struct.
+ *
+ * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
+ * 'as soon as possible', meaning that it not delay waiting for vblank.
+ * This may cause tearing on the screen.
+ *
+ * The reserved field must be zero.
+ */
+
+struct drm_mode_crtc_page_flip {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 reserved;
+ __u64 user_data;
+};
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * Same as struct drm_mode_crtc_page_flip, but supports new flags and
+ * re-purposes the reserved field:
+ *
+ * The sequence field must be zero unless either of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When
+ * the ABSOLUTE flag is specified, the sequence field denotes the absolute
+ * vblank sequence when the flip should take effect. When the RELATIVE
+ * flag is specified, the sequence field denotes the relative (to the
+ * current one when the ioctl is called) vblank sequence when the flip
+ * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to
+ * make sure the vblank sequence before the target one has passed before
+ * calling this ioctl. The purpose of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify
+ * the target for when code dealing with a page flip runs during a
+ * vertical blank period.
+ */
+
+struct drm_mode_crtc_page_flip_target {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 sequence;
+ __u64 user_data;
+};
+
+/**
+ * struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout.
+ * @height: buffer height in pixels
+ * @width: buffer width in pixels
+ * @bpp: bits per pixel
+ * @flags: must be zero
+ * @handle: buffer object handle
+ * @pitch: number of bytes between two consecutive lines
+ * @size: size of the whole buffer in bytes
+ *
+ * User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds,
+ * the kernel fills @handle, @pitch and @size.
+ */
+struct drm_mode_create_dumb {
+ __u32 height;
+ __u32 width;
+ __u32 bpp;
+ __u32 flags;
+
+ __u32 handle;
+ __u32 pitch;
+ __u64 size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_mode_destroy_dumb {
+ __u32 handle;
+};
+
+/**
+ * DRM_MODE_ATOMIC_TEST_ONLY
+ *
+ * Do not apply the atomic commit, instead check whether the hardware supports
+ * this configuration.
+ *
+ * See &drm_mode_config_funcs.atomic_check for more details on test-only
+ * commits.
+ */
+#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
+/**
+ * DRM_MODE_ATOMIC_NONBLOCK
+ *
+ * Do not block while applying the atomic commit. The &DRM_IOCTL_MODE_ATOMIC
+ * IOCTL returns immediately instead of waiting for the changes to be applied
+ * in hardware. Note, the driver will still check that the update can be
+ * applied before retuning.
+ */
+#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
+/**
+ * DRM_MODE_ATOMIC_ALLOW_MODESET
+ *
+ * Allow the update to result in temporary or transient visible artifacts while
+ * the update is being applied. Applying the update may also take significantly
+ * more time than a page flip. All visual artifacts will disappear by the time
+ * the update is completed, as signalled through the vblank event's timestamp
+ * (see struct drm_event_vblank).
+ *
+ * This flag must be set when the KMS update might cause visible artifacts.
+ * Without this flag such KMS update will return a EINVAL error. What kind of
+ * update may cause visible artifacts depends on the driver and the hardware.
+ * User-space that needs to know beforehand if an update might cause visible
+ * artifacts can use &DRM_MODE_ATOMIC_TEST_ONLY without
+ * &DRM_MODE_ATOMIC_ALLOW_MODESET to see if it fails.
+ *
+ * To the best of the driver's knowledge, visual artifacts are guaranteed to
+ * not appear when this flag is not set. Some sinks might display visual
+ * artifacts outside of the driver's control.
+ */
+#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
+
+/**
+ * DRM_MODE_ATOMIC_FLAGS
+ *
+ * Bitfield of flags accepted by the &DRM_IOCTL_MODE_ATOMIC IOCTL in
+ * &drm_mode_atomic.flags.
+ */
+#define DRM_MODE_ATOMIC_FLAGS (\
+ DRM_MODE_PAGE_FLIP_EVENT |\
+ DRM_MODE_PAGE_FLIP_ASYNC |\
+ DRM_MODE_ATOMIC_TEST_ONLY |\
+ DRM_MODE_ATOMIC_NONBLOCK |\
+ DRM_MODE_ATOMIC_ALLOW_MODESET)
+
+struct drm_mode_atomic {
+ __u32 flags;
+ __u32 count_objs;
+ __u64 objs_ptr;
+ __u64 count_props_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u64 reserved;
+ __u64 user_data;
+};
+
+struct drm_format_modifier_blob {
+#define FORMAT_BLOB_CURRENT 1
+ /* Version of this blob format */
+ __u32 version;
+
+ /* Flags */
+ __u32 flags;
+
+ /* Number of fourcc formats supported */
+ __u32 count_formats;
+
+ /* Where in this blob the formats exist (in bytes) */
+ __u32 formats_offset;
+
+ /* Number of drm_format_modifiers */
+ __u32 count_modifiers;
+
+ /* Where in this blob the modifiers exist (in bytes) */
+ __u32 modifiers_offset;
+
+ /* __u32 formats[] */
+ /* struct drm_format_modifier modifiers[] */
+};
+
+struct drm_format_modifier {
+ /* Bitmask of formats in get_plane format list this info applies to. The
+ * offset allows a sliding window of which 64 formats (bits).
+ *
+ * Some examples:
+ * In today's world with < 65 formats, and formats 0, and 2 are
+ * supported
+ * 0x0000000000000005
+ * ^-offset = 0, formats = 5
+ *
+ * If the number formats grew to 128, and formats 98-102 are
+ * supported with the modifier:
+ *
+ * 0x0000007c00000000 0000000000000000
+ * ^
+ * |__offset = 64, formats = 0x7c00000000
+ *
+ */
+ __u64 formats;
+ __u32 offset;
+ __u32 pad;
+
+ /* The modifier that applies to the >get_plane format list bitmask. */
+ __u64 modifier;
+};
+
+/**
+ * struct drm_mode_create_blob - Create New blob property
+ *
+ * Create a new 'blob' data property, copying length bytes from data pointer,
+ * and returning new blob ID.
+ */
+struct drm_mode_create_blob {
+ /** @data: Pointer to data to copy. */
+ __u64 data;
+ /** @length: Length of data to copy. */
+ __u32 length;
+ /** @blob_id: Return: new property ID. */
+ __u32 blob_id;
+};
+
+/**
+ * struct drm_mode_destroy_blob - Destroy user blob
+ * @blob_id: blob_id to destroy
+ *
+ * Destroy a user-created blob property.
+ *
+ * User-space can release blobs as soon as they do not need to refer to them by
+ * their blob object ID. For instance, if you are using a MODE_ID blob in an
+ * atomic commit and you will not make another commit re-using the same ID, you
+ * can destroy the blob as soon as the commit has been issued, without waiting
+ * for it to complete.
+ */
+struct drm_mode_destroy_blob {
+ __u32 blob_id;
+};
+
+/**
+ * struct drm_mode_create_lease - Create lease
+ *
+ * Lease mode resources, creating another drm_master.
+ *
+ * The @object_ids array must reference at least one CRTC, one connector and
+ * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively,
+ * the lease can be completely empty.
+ */
+struct drm_mode_create_lease {
+ /** @object_ids: Pointer to array of object ids (__u32) */
+ __u64 object_ids;
+ /** @object_count: Number of object ids */
+ __u32 object_count;
+ /** @flags: flags for new FD (O_CLOEXEC, etc) */
+ __u32 flags;
+
+ /** @lessee_id: Return: unique identifier for lessee. */
+ __u32 lessee_id;
+ /** @fd: Return: file descriptor to new drm_master file */
+ __u32 fd;
+};
+
+/**
+ * struct drm_mode_list_lessees - List lessees
+ *
+ * List lesses from a drm_master.
+ */
+struct drm_mode_list_lessees {
+ /**
+ * @count_lessees: Number of lessees.
+ *
+ * On input, provides length of the array.
+ * On output, provides total number. No
+ * more than the input number will be written
+ * back, so two calls can be used to get
+ * the size and then the data.
+ */
+ __u32 count_lessees;
+ /** @pad: Padding. */
+ __u32 pad;
+
+ /**
+ * @lessees_ptr: Pointer to lessees.
+ *
+ * Pointer to __u64 array of lessee ids
+ */
+ __u64 lessees_ptr;
+};
+
+/**
+ * struct drm_mode_get_lease - Get Lease
+ *
+ * Get leased objects.
+ */
+struct drm_mode_get_lease {
+ /**
+ * @count_objects: Number of leased objects.
+ *
+ * On input, provides length of the array.
+ * On output, provides total number. No
+ * more than the input number will be written
+ * back, so two calls can be used to get
+ * the size and then the data.
+ */
+ __u32 count_objects;
+ /** @pad: Padding. */
+ __u32 pad;
+
+ /**
+ * @objects_ptr: Pointer to objects.
+ *
+ * Pointer to __u32 array of object ids.
+ */
+ __u64 objects_ptr;
+};
+
+/**
+ * struct drm_mode_revoke_lease - Revoke lease
+ */
+struct drm_mode_revoke_lease {
+ /** @lessee_id: Unique ID of lessee */
+ __u32 lessee_id;
+};
+
+/**
+ * struct drm_mode_rect - Two dimensional rectangle.
+ * @x1: Horizontal starting coordinate (inclusive).
+ * @y1: Vertical starting coordinate (inclusive).
+ * @x2: Horizontal ending coordinate (exclusive).
+ * @y2: Vertical ending coordinate (exclusive).
+ *
+ * With drm subsystem using struct drm_rect to manage rectangular area this
+ * export it to user-space.
+ *
+ * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS.
+ */
+struct drm_mode_rect {
+ __s32 x1;
+ __s32 y1;
+ __s32 x2;
+ __s32 y2;
+};
+
+/**
+ * struct drm_mode_closefb
+ * @fb_id: Framebuffer ID.
+ * @pad: Must be zero.
+ */
+struct drm_mode_closefb {
+ __u32 fb_id;
+ __u32 pad;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/src/drvemu.c b/src/drvemu.c
new file mode 100644
index 0000000..75592f7
--- /dev/null
+++ b/src/drvemu.c
@@ -0,0 +1,530 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <string.h>
+#include <assert.h>
+#include <dlfcn.h>
+
+#include <dirent.h>
+
+#include <sys/mman.h>
+#include <sys/syscall.h>
+
+#include <asm-generic/ioctl.h>
+
+#include "xf86drm.h"
+
+#define DRM_PVR_RESERVED1 (DRM_COMMAND_BASE + 0)
+#define DRM_PVR_RESERVED2 (DRM_COMMAND_BASE + 1)
+#define DRM_PVR_RESERVED3 (DRM_COMMAND_BASE + 2)
+#define DRM_PVR_RESERVED4 (DRM_COMMAND_BASE + 3)
+#define DRM_PVR_RESERVED5 (DRM_COMMAND_BASE + 4)
+#define DRM_PVR_RESERVED6 (DRM_COMMAND_BASE + 5)
+
+/* PVR includes */
+#define SUPPORT_MEMINFO_IDS
+#define SUPPORT_DRI_DRM_EXT
+#include <config_kernel.h>
+#include <sgxfeaturedefs.h>
+#include <pvr_bridge.h>
+#include <sgx_bridge.h>
+#include <pvr_drm_shared.h>
+#include <sgxconfig.h>
+#include <sgx_mkif_km.h>
+#include <sgx_options.h>
+#include <sgxapi_km.h>
+
+#define DRM_IOCTL_PVR_SRVKM DRM_IOWR(PVR_DRM_SRVKM_CMD, PVRSRV_BRIDGE_PACKAGE)
+#include "pvr_ioctl.h"
+
+#define MAX_ALLOCATIONS 1024
+
+#define LOG(msg...) fprintf(stderr, "[DRVEMU] " msg)
+
+struct memory_allocation {
+ bool allocated;
+ uint32_t device_ptr;
+ void *cpu_ptr;
+ size_t size;
+};
+
+static struct memory_allocation allocations[MAX_ALLOCATIONS];
+
+static int false_fd = 10241024;
+
+static int alloc_memory(int heap, size_t size) {
+ for (size_t i = 0; i < MAX_ALLOCATIONS; i++) {
+ if (!allocations[i].allocated) {
+ struct memory_allocation *mem = &allocations[i];
+ mem->allocated = true;
+ // TODO use heaps to get right device addr
+ mem->device_ptr = 0;
+ mem->cpu_ptr = malloc(size);
+ mem->size = size;
+ return i+1;
+ }
+ }
+
+ assert(0);
+ return -1;
+}
+
+#define PROLOG(func) \
+ static typeof(func) *orig_##func = NULL; \
+ if(!orig_##func) \
+ orig_##func = dlsym(RTLD_NEXT, #func);
+
+
+int open64(const char *pathname, int flags, ...) {
+ LOG("Called open64 on %s (%d)\n", pathname, flags);
+ PROLOG(open64);
+
+ if (strcmp(pathname, "/dev/dri/renderD128") == 0) {
+ LOG("Spoofing FD!\n");
+ return false_fd;
+ }
+
+ int fd = orig_open64(pathname, flags);
+ return fd;
+}
+
+int close(int fd) {
+ LOG("Close called on %d\n", fd);
+ PROLOG(close);
+
+ if (fd != false_fd)
+ return orig_close(fd);
+
+ return 0;
+}
+
+int fcntl(int fd, int op, int arg) {
+ LOG("Called fcntl on %d\n", fd);
+ PROLOG(fcntl);
+
+ if (fd == false_fd)
+ return 0;
+
+ return orig_fcntl(fd, op, arg);
+}
+
+PVRSRV_HEAP_INFO pvr_heaps[] = {
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID),
+ .hDevMemHeap = (void*)1,
+ .sDevVAddrBase = {SGX_GENERAL_HEAP_BASE},
+ .ui32HeapByteSize = SGX_GENERAL_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID),
+ .hDevMemHeap = (void*)2,
+ .sDevVAddrBase = {SGX_TADATA_HEAP_BASE},
+ .ui32HeapByteSize = SGX_TADATA_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID),
+ .hDevMemHeap = (void*)3,
+ .sDevVAddrBase = {SGX_KERNEL_CODE_HEAP_BASE},
+ .ui32HeapByteSize = SGX_KERNEL_CODE_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID),
+ .hDevMemHeap = (void*)4,
+ .sDevVAddrBase = {SGX_KERNEL_DATA_HEAP_BASE},
+ .ui32HeapByteSize = SGX_KERNEL_DATA_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID),
+ .hDevMemHeap = (void*)5,
+ .sDevVAddrBase = {SGX_PIXELSHADER_HEAP_BASE},
+ .ui32HeapByteSize = SGX_PIXELSHADER_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID),
+ .hDevMemHeap = (void*)6,
+ .sDevVAddrBase = {SGX_VERTEXSHADER_HEAP_BASE},
+ .ui32HeapByteSize = SGX_VERTEXSHADER_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID),
+ .hDevMemHeap = (void*)7,
+ .sDevVAddrBase = {SGX_PDSPIXEL_CODEDATA_HEAP_BASE},
+ .ui32HeapByteSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID),
+ .hDevMemHeap = (void*)8,
+ .sDevVAddrBase = {SGX_PDSVERTEX_CODEDATA_HEAP_BASE},
+ .ui32HeapByteSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID),
+ .hDevMemHeap = (void*)9,
+ .sDevVAddrBase = {SGX_SYNCINFO_HEAP_BASE},
+ .ui32HeapByteSize = SGX_SYNCINFO_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SHARED_3DPARAMETERS_HEAP_ID),
+ .hDevMemHeap = (void*)10,
+ .sDevVAddrBase = {SGX_SHARED_3DPARAMETERS_HEAP_BASE},
+ .ui32HeapByteSize = SGX_SHARED_3DPARAMETERS_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_MULTI_PROCESS,
+ },
+ {
+ .ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PERCONTEXT_3DPARAMETERS_HEAP_ID),
+ .hDevMemHeap = (void*)11,
+ .sDevVAddrBase = {SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE},
+ .ui32HeapByteSize = SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE,
+ .ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_MEM_RAM_BACKED_ALLOCATION | PVRSRV_HAP_SINGLE_PROCESS,
+ },
+};
+
+#define DEV_NAME "pvr"
+#define DEV_DATE "20110701"
+#define DEV_DESC "Imagination Technologies PVR DRM"
+
+void get_misc_info(SGX_MISC_INFO *info) {
+ switch(info->eRequest) {
+ case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV:
+ info->uData.sSGXFeatures.ui32BuildOptions = SGX_BUILD_OPTIONS;
+ info->uData.sSGXFeatures.ui32DDKBuild = 3759903;
+ info->uData.sSGXFeatures.ui32DDKVersion = (1 << 16) | (14 << 8);
+ break;
+ case SGX_MISC_INFO_REQUEST_SGXREV:
+ // TODO need to get this from real hardware
+ info->uData.sSGXFeatures.ui32BuildOptions = SGX_BUILD_OPTIONS;
+ info->uData.sSGXFeatures.ui32DDKBuild = 3759903;
+ info->uData.sSGXFeatures.ui32DDKVersion = (1 << 16) | (14 << 8);
+ break;
+ case SGX_MISC_INFO_REQUEST_CLOCKSPEED_SLCSIZE:
+ info->uData.sQueryClockSpeedSLCSize.ui32SGXClockSpeed = SYS_SGX_CLOCK_SPEED;
+ break;
+ default:
+ LOG("Unimplemented misc req %d\n", info->eRequest);
+ assert(false);
+ break;
+ }
+}
+
+static bool pvrsrv_ioctl(int fd, PVRSRV_BRIDGE_PACKAGE *bridge_package) {
+ int ioctl_nr = _IOC_NR(bridge_package->ui32BridgeID);
+ LOG(">>> pvr_ioctl(%s)(0x%x)\n", pvrsrv_ioctl_names[ioctl_nr], bridge_package->ui32BridgeID);
+ switch(ioctl_nr) {
+ case _IOC_NR(PVRSRV_BRIDGE_CONNECT_SERVICES): {
+ PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *data = bridge_package->pvParamOut;
+ data->eError = PVRSRV_OK;
+ data->hKernelServices = (void*)0x2;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_ENUM_DEVICES): {
+ PVRSRV_BRIDGE_OUT_ENUMDEVICE *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->ui32NumDevices = 1;
+ out->asDeviceIdentifier[0].eDeviceType = PVRSRV_DEVICE_TYPE_SGX;
+ out->asDeviceIdentifier[0].eDeviceClass = PVRSRV_DEVICE_CLASS_3D;
+ out->asDeviceIdentifier[0].ui32DeviceIndex = 0;
+ out->asDeviceIdentifier[0].pszPDumpDevName = NULL;
+ out->asDeviceIdentifier[0].pszPDumpRegName = NULL;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO): {
+ PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->hDevCookie = (void*)0x1;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_SGX_GETMISCINFO): {
+ LOG("Get misc info!\n");
+ PVRSRV_BRIDGE_IN_SGXGETMISCINFO *in = bridge_package->pvParamIn;
+ PVRSRV_BRIDGE_RETURN *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ get_misc_info(in->psMiscInfo);
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_SGX_GETCLIENTINFO): {
+ PVRSRV_BRIDGE_OUT_GETCLIENTINFO *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->sClientInfo.ui32ProcessID = getpid();;
+ out->sClientInfo.pvProcess = NULL;
+ PVRSRV_MISC_INFO *misc = &out->sClientInfo.sMiscInfo;
+ misc->ui32StatePresent = 0;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_ENUM_CLASS): {
+ PVRSRV_BRIDGE_OUT_ENUMDEVICE *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->ui32NumDevices = 1;
+ out->asDeviceIdentifier[0].eDeviceType = PVRSRV_DEVICE_TYPE_SGX;
+ out->asDeviceIdentifier[0].eDeviceClass = PVRSRV_DEVICE_CLASS_3D;
+ out->asDeviceIdentifier[0].ui32DeviceIndex = 0;
+ out->asDeviceIdentifier[0].pszPDumpDevName = "";
+ out->asDeviceIdentifier[0].pszPDumpRegName = "";
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT): {
+ PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->hDevMemContext = (void*)0x2;
+ out->ui32ClientHeapCount = sizeof(pvr_heaps)/sizeof(*pvr_heaps);
+ memcpy(out->sHeapInfo, pvr_heaps, sizeof(pvr_heaps));
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE): {
+ PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->hDeviceKM = (void*)0x1;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_GET_DISPCLASS_INFO): {
+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->sDisplayInfo.ui32MaxSwapChains = 3;
+ out->sDisplayInfo.ui32MaxSwapChainBuffers = 3;
+ out->sDisplayInfo.ui32MinSwapInterval = 0;
+ out->sDisplayInfo.ui32MaxSwapInterval = 1;
+ out->sDisplayInfo.ui32PhysicalWidthmm = 256;
+ out->sDisplayInfo.ui32PhysicalHeightmm = 256;
+ strcpy(out->sDisplayInfo.szDisplayName, "Display");
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS): {
+ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->ui32Count = 1;
+ out->asFormat[0].pixelformat = PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS): {
+ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->ui32Count = 1;
+ out->asDim[0].ui32ByteStride = 256;
+ out->asDim[0].ui32Width = 256;
+ out->asDim[0].ui32Height = 256;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER): {
+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->hBuffer = (void*)alloc_memory(0, 256*256*4);
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY): {
+ PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *in = bridge_package->pvParamIn;
+ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *out = bridge_package->pvParamOut;
+ LOG("Attempting map 0x%x\n", (uint32_t)in->hDeviceClassBuffer);
+ out->eError = PVRSRV_OK;
+ out->sClientMemInfo.hMappingInfo = (void*)alloc_memory(0, 256*256*4);
+ struct memory_allocation *mem = &allocations[((size_t)out->sClientMemInfo.hMappingInfo) - 1];
+ out->sClientMemInfo.pvLinAddr = mem->cpu_ptr;
+ out->sClientMemInfo.pvLinAddrKM = mem->cpu_ptr;
+ out->sClientMemInfo.uAllocSize = mem->size;
+ out->sClientMemInfo.hKernelMemInfo = out->sClientMemInfo.hMappingInfo;
+ out->sClientMemInfo.hResItem = out->sClientMemInfo.hMappingInfo;
+ out->sClientMemInfo.psNext = NULL;
+
+ out->sClientSyncInfo.hMappingInfo = (void*)alloc_memory(0, 1024);
+ out->sClientSyncInfo.psSyncData = NULL;
+ out->sClientSyncInfo.sWriteOpsCompleteDevVAddr.uiAddr = 0xDEADBEEF;
+ out->sClientSyncInfo.sReadOpsCompleteDevVAddr.uiAddr = (uintptr_t)out->sClientSyncInfo.hMappingInfo;
+ out->sClientSyncInfo.sReadOps2CompleteDevVAddr.uiAddr = 0xDEADBEF1;
+ out->sClientSyncInfo.hKernelSyncInfo = out->sClientSyncInfo.hMappingInfo;
+
+ out->psKernelMemInfo = NULL;
+ out->hMappingInfo = out->sClientMemInfo.hMappingInfo;
+ LOG("Allocated 0x%x\n", (uint32_t)out->sClientMemInfo.hMappingInfo);
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA): {
+ PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *in = bridge_package->pvParamIn;
+ PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *out = bridge_package->pvParamOut;
+ size_t handle = (size_t)in->hMHandle;
+ LOG("Attempting mhandle map 0x%x\n", handle);
+ if (handle - 1 < MAX_ALLOCATIONS) {
+ struct memory_allocation *mem = &allocations[handle - 1];
+ out->eError = PVRSRV_OK;
+ out->uiMMapOffset = (uintptr_t)mem->cpu_ptr;
+ out->uiByteOffset = 0;
+ out->uiRealByteSize = mem->size;
+ out->uiUserVAddr = (uintptr_t)mem->cpu_ptr;
+ } else {
+ out->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN): {
+ PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ // TODO figure out what this is
+ out->ui32ReadOpsPending = 0;
+ out->ui32WriteOpsPending = 0;
+ out->ui32ReadOps2Pending = 0;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN): {
+ PVRSRV_BRIDGE_RETURN *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_RELEASE_MMAP_DATA): {
+ PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ // TODO figure out what this is
+ out->bMUnmap = false;
+ break;
+ }
+ case _IOC_NR(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
+ case _IOC_NR(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO):
+ case _IOC_NR(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE):
+ case _IOC_NR(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY):
+ case _IOC_NR(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT): {
+ PVRSRV_BRIDGE_RETURN *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ break;
+ }
+#if 0
+ case _IOC_NR(PVRSRV_BRIDGE_SGX_DEVINITPART2): {
+ PVRSRV_BRIDGE_OUT_SGXDEVINITPART2 *out = bridge_package->pvParamOut;
+ out->eError = PVRSRV_OK;
+ out->ui32KMBuildOptions = SGX_BUILD_OPTIONS;
+ printf("Input size is 0x%x\n", bridge_package->ui32InBufferSize);
+ printf("Build options 0x%x\n", SGX_BUILD_OPTIONS);
+ //out->ui32KMBuildOptions = 0x36a118;
+ break;
+ }
+#endif
+ default:
+ LOG("Unimplemented pvrsrv ioctl %d, may be %s\n", ioctl_nr, pvrsrv_ioctl_names[ioctl_nr]);
+ assert(false);
+ break;
+ }
+ return false;
+}
+
+static bool spoof_ioctl(int fd, int request, void *ptr) {
+ int ioctl_nr = _IOC_NR(request);
+ switch(ioctl_nr) {
+ case _IOC_NR(DRM_IOCTL_VERSION): {
+ LOG("Spoofing device info\n");
+ drmVersionPtr version = (drmVersionPtr)ptr;
+ version->version_major = 1;
+ version->version_minor = 13;
+ version->version_patchlevel = 3341330;
+ version->name_len = sizeof(DEV_NAME);
+ version->date_len = sizeof(DEV_DATE);
+ version->desc_len = sizeof(DEV_DESC);
+
+ if (version->name)
+ strcpy(version->name, DEV_NAME);
+ if (version->date)
+ strcpy(version->date, DEV_DATE);
+ if (version->desc)
+ strcpy(version->desc, DEV_DESC);
+
+ break;
+ }
+
+#if 0
+ case _IOC_NR(DRM_IOCTL_GET_MAGIC):
+ printf(">>> ioctl(DRM_IOCTL_GET_MAGIC)\n");
+ break;
+ case _IOC_NR(DRM_IOCTL_GET_UNIQUE):
+ printf(">>> ioctl(DRM_IOCTL_GET_UNIQUE)\n");
+ break;
+ case _IOC_NR(DRM_IOCTL_SET_VERSION):
+ printf(">>> ioctl(DRM_IOCTL_SET_VERSION)\n");
+ {
+ struct drm_set_version *data = ptr;
+ printf("\t%d %d %d %d\n", data->drm_di_major,
+ data->drm_di_minor,
+ data->drm_dd_major,
+ data->drm_dd_minor);
+ }
+ break;
+#endif
+ case _IOC_NR(DRM_IOCTL_DROP_MASTER):
+ LOG(">>> ioctl(DRM_IOCTL_DROP_MASTER)\n");
+ return 0;
+ case PVR_DRM_SRVKM_CMD:
+ //fwrite(ptr, 1, sizeof(PVRSRV_BRIDGE_PACKAGE), log_file);
+ //PPRINT(stdout, ptr, PVRSRV_BRIDGE_PACKAGE);
+ return pvrsrv_ioctl(fd, ptr);
+ break;
+ case PVR_DRM_IS_MASTER_CMD:
+ LOG(">>> ioctl(PVR_DRM_IS_MASTER_CMD) 0x%x\n", PVR_DRM_SRVKM_CMD);
+ /* From KMD source code this seems to always return 0 */
+ break;
+ default:
+ LOG("Unimplemented ioctl 0x%x\n", ioctl_nr);
+ assert(false);
+ break;
+ }
+ return 0;
+}
+
+int ioctl(int fd, int request, ...) {
+ PROLOG(ioctl);
+ int ioc_size = _IOC_SIZE(request);
+ bool pvr = fd == false_fd;
+
+ //printf("Size is %d\n", ioc_size);
+ void *ptr = NULL;
+ if(ioc_size) {
+ va_list args;
+ va_start(args, request);
+ ptr = va_arg(args, void *);
+ va_end(args);
+ }
+
+ LOG("Got ioctl %d!\n", fd);
+
+ if (pvr)
+ return spoof_ioctl(fd, request, ptr);
+ else
+ return orig_ioctl(fd, request, ptr);
+}
+
+int open(const char *pathname, int flags, mode_t mode) {
+ LOG("Called open on %s (%d)\n", pathname, flags);
+ PROLOG(open);
+ return orig_open(pathname, flags, mode);
+}
+
+int openat(int dirfd, const char *pathname, int flags, ...) {
+ LOG("Called openat on %s (%d) (%d)\n", pathname, dirfd, flags);
+ PROLOG(openat);
+
+ return orig_openat(dirfd, pathname, flags);
+}
+
+int openat64(int fd, const char * path, int oflag, ...) {
+ LOG("Called openat64 %s\n", path);
+ PROLOG(openat64);
+ return orig_openat64(fd, path, oflag);
+}
+
+DIR *opendir(const char *dirname) {
+ LOG("Opening dir %s\n", dirname);
+ PROLOG(opendir);
+ return orig_opendir(dirname);
+}
+
+struct dirent *readdir(DIR *dirp) {
+ PROLOG(readdir);
+ struct dirent *out = orig_readdir(dirp);
+ if (out) {
+ //printf("Reading %d %d %s\n", (int)out->d_type, (int)out->d_reclen, (char*)out->d_name);
+ }
+ return out;
+}
diff --git a/src/pvr_ioctl.h b/src/pvr_ioctl.h
new file mode 100644
index 0000000..5802079
--- /dev/null
+++ b/src/pvr_ioctl.h
@@ -0,0 +1,20 @@
+#ifndef PVR_IOCTL_H
+#define PVR_IOCTL_H
+
+#if 0
+#define PVRSRV_IOWR(INDEX) (INDEX)
+#define PVRSRV_IOCTL(NAME, VALUE) NAME = (VALUE),
+
+enum pvrsrv_ioctl {
+#include "pvr_ioctl.inc"
+};
+
+#undef PVRSRV_IOCTL
+#endif
+#define PVRSRV_IOCTL(NAME, VALUE) [ _IOC_NR(NAME) ] = #NAME,
+
+const char *pvrsrv_ioctl_names[] = {
+#include "pvr_ioctl.inc"
+};
+
+#endif
diff --git a/src/pvr_ioctl.inc b/src/pvr_ioctl.inc
new file mode 100644
index 0000000..094299f
--- /dev/null
+++ b/src/pvr_ioctl.inc
@@ -0,0 +1,251 @@
+
+#define PVRSRV_BRIDGE_UMKM_CMD_FIRST 0UL
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UM_KM_COMPAT_CHECK, 0)
+#define PVRSRV_BRIDGE_UMKM_CMD_LAST (0)
+
+#define PVRSRV_BRIDGE_CORE_CMD_FIRST (PVRSRV_BRIDGE_UMKM_CMD_LAST + 1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRV_BRIDGE_CORE_CMD_FIRST+0) /*!< enumerate device bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRV_BRIDGE_CORE_CMD_FIRST+1) /*!< acquire device data bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, PVRSRV_BRIDGE_CORE_CMD_FIRST+2) /*!< release device data bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRV_BRIDGE_CORE_CMD_FIRST+3) /*!< create device addressable memory context */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRV_BRIDGE_CORE_CMD_FIRST+4) /*!< destroy device addressable memory context */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, PVRSRV_BRIDGE_CORE_CMD_FIRST+5) /*!< get device memory heap info */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRV_BRIDGE_CORE_CMD_FIRST+6) /*!< alloc device memory bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRV_BRIDGE_CORE_CMD_FIRST+7) /*!< free device memory bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRV_BRIDGE_CORE_CMD_FIRST+8) /*!< get free device memory bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, PVRSRV_BRIDGE_CORE_CMD_FIRST+9) /*!< create Cmd Q bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, PVRSRV_BRIDGE_CORE_CMD_FIRST+10) /*!< destroy Cmd Q bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA, PVRSRV_BRIDGE_CORE_CMD_FIRST+11) /*!< generate mmap data from a memory handle */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRV_BRIDGE_CORE_CMD_FIRST+12) /*!< services connect bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRV_BRIDGE_CORE_CMD_FIRST+13) /*!< services disconnect bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, PVRSRV_BRIDGE_CORE_CMD_FIRST+14) /*!< wrap device memory bridge index */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, PVRSRV_BRIDGE_CORE_CMD_FIRST+15) /*!< read the kernel meminfo record */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM, PVRSRV_BRIDGE_CORE_CMD_FIRST+16)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, PVRSRV_BRIDGE_CORE_CMD_FIRST+17)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MAP_EXT_MEMORY, PVRSRV_BRIDGE_CORE_CMD_FIRST+18)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, PVRSRV_BRIDGE_CORE_CMD_FIRST+19)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MAP_DEV_MEMORY, PVRSRV_BRIDGE_CORE_CMD_FIRST+20)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, PVRSRV_BRIDGE_CORE_CMD_FIRST+21)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, PVRSRV_BRIDGE_CORE_CMD_FIRST+22)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, PVRSRV_BRIDGE_CORE_CMD_FIRST+23)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_EXPORT_DEVICEMEM, PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_RELEASE_MMAP_DATA, PVRSRV_BRIDGE_CORE_CMD_FIRST+25)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS, PVRSRV_BRIDGE_CORE_CMD_FIRST+26)
+#define PVRSRV_BRIDGE_MAP_DEV_MEMORY_2 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
+#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+28)
+#if defined (SUPPORT_ION)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MAP_ION_HANDLE, PVRSRV_BRIDGE_CORE_CMD_FIRST+29)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNMAP_ION_HANDLE, PVRSRV_BRIDGE_CORE_CMD_FIRST+30)
+#define PVRSRV_BRIDGE_ION_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+30)
+#else
+#define PVRSRV_BRIDGE_ION_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+28)
+#endif
+#if defined (SUPPORT_DMABUF)
+#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST (PVRSRV_BRIDGE_ION_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MAP_DMABUF, PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNMAP_DMABUF, PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1)
+#else
+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST PVRSRV_BRIDGE_ION_CMD_LAST
+#endif
+#define PVRSRV_BRIDGE_CORE_CMD_LAST PVRSRV_BRIDGE_DMABUF_CMD_LAST
+/* SIM */
+#define PVRSRV_BRIDGE_SIM_CMD_FIRST (PVRSRV_BRIDGE_CORE_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, PVRSRV_BRIDGE_SIM_CMD_FIRST+0) /*!< RTSIM pseudo ISR */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, PVRSRV_BRIDGE_SIM_CMD_FIRST+1) /*!< Register RTSIM process thread */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, PVRSRV_BRIDGE_SIM_CMD_FIRST+2) /*!< Unregister RTSIM process thread */
+#define PVRSRV_BRIDGE_SIM_CMD_LAST (PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
+
+/* User Mapping */
+#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST (PVRSRV_BRIDGE_SIM_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0) /*!< map CPU phys to user space */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1) /*!< unmap CPU phys to user space */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2) /*!< get user copy of Phys to Lin loopup table */
+#define PVRSRV_BRIDGE_MAPPING_CMD_LAST (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
+
+#define PVRSRV_BRIDGE_STATS_CMD_FIRST (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_FB_STATS, PVRSRV_BRIDGE_STATS_CMD_FIRST+0) /*!< Get FB memory stats */
+#define PVRSRV_BRIDGE_STATS_CMD_LAST (PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
+
+/* API to retrieve misc. info. from services */
+#define PVRSRV_BRIDGE_MISC_CMD_FIRST (PVRSRV_BRIDGE_STATS_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRV_BRIDGE_MISC_CMD_FIRST+0) /*!< misc. info. */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_RELEASE_MISC_INFO, PVRSRV_BRIDGE_MISC_CMD_FIRST+1) /*!< misc. info. */
+#define PVRSRV_BRIDGE_MISC_CMD_LAST (PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
+
+/* Overlay ioctls */
+
+#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
+#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST (PVRSRV_BRIDGE_MISC_CMD_LAST+1)
+#define PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+0) /*!< 3D Overlay rotate blit init */
+#define PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1) /*!< 3D Overlay rotate blit deinit */
+#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
+#else
+#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST PVRSRV_BRIDGE_MISC_CMD_LAST
+#endif
+
+/* PDUMP */
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST (PVRSRV_BRIDGE_OVERLAY_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_INIT, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_MEMPOL, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_REG, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_REGPOL, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_COMMENT, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_SETFRAME, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_MEMPAGES, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15) /*!< pdump command structure */
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE, PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
+#else
+/* Note we are carefull here not to leave a large gap in the ioctl numbers.
+ * (Some ports may use these values to index into an array where large gaps can
+ * waste memory) */
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST PVRSRV_BRIDGE_OVERLAY_CMD_LAST
+#endif
+
+/* DisplayClass APIs */
+#define PVRSRV_BRIDGE_OEM_CMD_FIRST (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_OEMJTABLE, PVRSRV_BRIDGE_OEM_CMD_FIRST+0) /*!< Get OEM Jtable */
+#define PVRSRV_BRIDGE_OEM_CMD_LAST (PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
+
+/* device class enum */
+#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST (PVRSRV_BRIDGE_OEM_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+2)
+
+/* display class API */
+#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13)
+#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2 PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+15)
+#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+15)
+
+/* buffer class API */
+#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
+#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
+
+/* Wrap/Unwrap external memory */
+#define PVRSRV_BRIDGE_WRAP_CMD_FIRST (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRV_BRIDGE_WRAP_CMD_FIRST+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_WRAP_CMD_LAST (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
+
+/* Shared memory */
+#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST (PVRSRV_BRIDGE_WRAP_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM, PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
+#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
+
+/* Intialisation Service support */
+#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_INITSRV_CONNECT, PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_INITSRV_DISCONNECT, PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_INITSRV_CMD_LAST (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
+
+/* Event Objects */
+#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
+
+/* Sync ops */
+#define PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+2)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+3)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+4)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+5)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+6)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+7)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_ALLOC_SYNC_INFO, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+8)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_FREE_SYNC_INFO, PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+9)
+#define PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST (PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+9)
+
+/* For sgx_bridge.h (msvdx_bridge.h should probably use these defines too) */
+#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD (PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST+1)
+/* For sgx_bridge.h (msvdx_bridge.h should probably use these defines too) */
+#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD (PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST+1)
+
+#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_GETCLIENTINFO , PVRSRV_BRIDGE_SGX_CMD_BASE+0)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO , PVRSRV_BRIDGE_SGX_CMD_BASE+1)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, PVRSRV_BRIDGE_SGX_CMD_BASE+2)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_DOKICK , PVRSRV_BRIDGE_SGX_CMD_BASE+3)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR , PVRSRV_BRIDGE_SGX_CMD_BASE+4)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD , PVRSRV_BRIDGE_SGX_CMD_BASE+5)
+
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE , PVRSRV_BRIDGE_SGX_CMD_BASE+9)
+
+#if defined(TRANSFER_QUEUE)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER , PVRSRV_BRIDGE_SGX_CMD_BASE+13)
+#endif
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_GETMISCINFO , PVRSRV_BRIDGE_SGX_CMD_BASE+14)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , PVRSRV_BRIDGE_SGX_CMD_BASE+15)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_DEVINITPART2 , PVRSRV_BRIDGE_SGX_CMD_BASE+16)
+
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC , PVRSRV_BRIDGE_SGX_CMD_BASE+17)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC , PVRSRV_BRIDGE_SGX_CMD_BASE+18)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC , PVRSRV_BRIDGE_SGX_CMD_BASE+19)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, PVRSRV_BRIDGE_SGX_CMD_BASE+20)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, PVRSRV_BRIDGE_SGX_CMD_BASE+21)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, PVRSRV_BRIDGE_SGX_CMD_BASE+22)
+#if defined(SGX_FEATURE_2D_HARDWARE)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_SUBMIT2D , PVRSRV_BRIDGE_SGX_CMD_BASE+23)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, PVRSRV_BRIDGE_SGX_CMD_BASE+24)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, PVRSRV_BRIDGE_SGX_CMD_BASE+25)
+#endif
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, PVRSRV_BRIDGE_SGX_CMD_BASE+26)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, PVRSRV_BRIDGE_SGX_CMD_BASE+27)
+
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES , PVRSRV_BRIDGE_SGX_CMD_BASE+28)
+
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB , PVRSRV_BRIDGE_SGX_CMD_BASE+29)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY , PVRSRV_BRIDGE_SGX_CMD_BASE+30)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY , PVRSRV_BRIDGE_SGX_CMD_BASE+31)
+
+#if defined(PDUMP)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY , PVRSRV_BRIDGE_SGX_CMD_BASE+32)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS, PVRSRV_BRIDGE_SGX_CMD_BASE+33)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS, PVRSRV_BRIDGE_SGX_CMD_BASE+34)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS, PVRSRV_BRIDGE_SGX_CMD_BASE+35)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB , PVRSRV_BRIDGE_SGX_CMD_BASE+36)
+PVRSRV_IOCTL(PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM , PVRSRV_BRIDGE_SGX_CMD_BASE+37)
+#endif
+
+/* *REMEMBER* to update PVRSRV_BRIDGE_LAST_SGX_CMD if you add/remove a command!
+ * You need to ensure all PVRSRV_BRIDGE_SGX_CMD_BASE+ offsets are sequential!
+ */
+#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+37)
diff --git a/src/xf86drm.h b/src/xf86drm.h
new file mode 100644
index 0000000..1bc6e22
--- /dev/null
+++ b/src/xf86drm.h
@@ -0,0 +1,983 @@
+/**
+ * \file xf86drm.h
+ * OS-independent header for DRM user-level library interface.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _XF86DRM_H_
+#define _XF86DRM_H_
+
+#include <stdarg.h>
+#include <sys/types.h>
+#include <stdint.h>
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef DRM_MAX_MINOR
+#define DRM_MAX_MINOR 64 /* deprecated */
+#endif
+
+#if defined(__linux__)
+
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOC_VOID _IOC_NONE
+#define DRM_IOC_READ _IOC_READ
+#define DRM_IOC_WRITE _IOC_WRITE
+#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+
+#else /* One of the *BSDs */
+
+#include <sys/ioccom.h>
+#define DRM_IOCTL_NR(n) ((n) & 0xff)
+#define DRM_IOC_VOID IOC_VOID
+#define DRM_IOC_READ IOC_OUT
+#define DRM_IOC_WRITE IOC_IN
+#define DRM_IOC_READWRITE IOC_INOUT
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+
+#endif
+
+ /* Defaults, if nothing set in xf86config */
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+/* Default /dev/dri directory permissions 0755 */
+#define DRM_DEV_DIRMODE \
+ (S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+
+#ifdef __OpenBSD__
+#define DRM_DIR_NAME "/dev"
+#define DRM_PRIMARY_MINOR_NAME "drm"
+#define DRM_CONTROL_MINOR_NAME "drmC" /* deprecated */
+#define DRM_RENDER_MINOR_NAME "drmR"
+#else
+#define DRM_DIR_NAME "/dev/dri"
+#define DRM_PRIMARY_MINOR_NAME "card"
+#define DRM_CONTROL_MINOR_NAME "controlD" /* deprecated */
+#define DRM_RENDER_MINOR_NAME "renderD"
+#define DRM_PROC_NAME "/proc/dri/" /* For backward Linux compatibility */
+#endif
+
+#define DRM_DEV_NAME "%s/" DRM_PRIMARY_MINOR_NAME "%d"
+#define DRM_CONTROL_DEV_NAME "%s/" DRM_CONTROL_MINOR_NAME "%d" /* deprecated */
+#define DRM_RENDER_DEV_NAME "%s/" DRM_RENDER_MINOR_NAME "%d"
+
+#define DRM_NODE_NAME_MAX \
+ (sizeof(DRM_DIR_NAME) + 1 /* slash */ \
+ + MAX3(sizeof(DRM_PRIMARY_MINOR_NAME), \
+ sizeof(DRM_CONTROL_MINOR_NAME), \
+ sizeof(DRM_RENDER_MINOR_NAME)) \
+ + sizeof("1048575") /* highest possible node number 2^MINORBITS - 1 */ \
+ + 1) /* NULL-terminator */
+
+#define DRM_ERR_NO_DEVICE (-1001)
+#define DRM_ERR_NO_ACCESS (-1002)
+#define DRM_ERR_NOT_ROOT (-1003)
+#define DRM_ERR_INVALID (-1004)
+#define DRM_ERR_NO_FD (-1005)
+
+#define DRM_AGP_NO_HANDLE 0
+
+typedef unsigned int drmSize, *drmSizePtr; /**< For mapped regions */
+typedef void *drmAddress, **drmAddressPtr; /**< For mapped regions */
+
+#if (__GNUC__ >= 3)
+#define DRM_PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a)))
+#else
+#define DRM_PRINTFLIKE(f, a)
+#endif
+
+typedef struct _drmServerInfo {
+ int (*debug_print)(const char *format, va_list ap) DRM_PRINTFLIKE(1,0);
+ int (*load_module)(const char *name);
+ void (*get_perms)(gid_t *, mode_t *);
+} drmServerInfo, *drmServerInfoPtr;
+
+typedef struct drmHashEntry {
+ int fd;
+ void (*f)(int, void *, void *);
+ void *tagTable;
+} drmHashEntry;
+
+extern int drmIoctl(int fd, unsigned long request, void *arg);
+extern void *drmGetHashTable(void);
+extern drmHashEntry *drmGetEntry(int fd);
+
+/**
+ * Driver version information.
+ *
+ * \sa drmGetVersion() and drmSetVersion().
+ */
+typedef struct _drmVersion {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ int name_len; /**< Length of name buffer */
+ char *name; /**< Name of driver */
+ int date_len; /**< Length of date buffer */
+ char *date; /**< User-space buffer to hold date */
+ int desc_len; /**< Length of desc buffer */
+ char *desc; /**< User-space buffer to hold desc */
+} drmVersion, *drmVersionPtr;
+
+typedef struct _drmStats {
+ unsigned long count; /**< Number of data */
+ struct {
+ unsigned long value; /**< Value from kernel */
+ const char *long_format; /**< Suggested format for long_name */
+ const char *long_name; /**< Long name for value */
+ const char *rate_format; /**< Suggested format for rate_name */
+ const char *rate_name; /**< Short name for value per second */
+ int isvalue; /**< True if value (vs. counter) */
+ const char *mult_names; /**< Multiplier names (e.g., "KGM") */
+ int mult; /**< Multiplier value (e.g., 1024) */
+ int verbose; /**< Suggest only in verbose output */
+ } data[15];
+} drmStatsT;
+
+
+ /* All of these enums *MUST* match with the
+ kernel implementation -- so do *NOT*
+ change them! (The drmlib implementation
+ will just copy the flags instead of
+ translating them.) */
+typedef enum {
+ DRM_FRAME_BUFFER = 0, /**< WC, no caching, no core dump */
+ DRM_REGISTERS = 1, /**< no caching, no core dump */
+ DRM_SHM = 2, /**< shared, cached */
+ DRM_AGP = 3, /**< AGP/GART */
+ DRM_SCATTER_GATHER = 4, /**< PCI scatter/gather */
+ DRM_CONSISTENT = 5 /**< PCI consistent */
+} drmMapType;
+
+typedef enum {
+ DRM_RESTRICTED = 0x0001, /**< Cannot be mapped to client-virtual */
+ DRM_READ_ONLY = 0x0002, /**< Read-only in client-virtual */
+ DRM_LOCKED = 0x0004, /**< Physical pages locked */
+ DRM_KERNEL = 0x0008, /**< Kernel requires access */
+ DRM_WRITE_COMBINING = 0x0010, /**< Use write-combining, if available */
+ DRM_CONTAINS_LOCK = 0x0020, /**< SHM page that contains lock */
+ DRM_REMOVABLE = 0x0040 /**< Removable mapping */
+} drmMapFlags;
+
+/**
+ * \warning These values *MUST* match drm.h
+ */
+typedef enum {
+ /** \name Flags for DMA buffer dispatch */
+ /*@{*/
+ DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note the buffer may not yet have been
+ * processed by the hardware -- getting a
+ * hardware lock with the hardware quiescent
+ * will ensure that the buffer has been
+ * processed.
+ */
+ DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+ /*@}*/
+
+ /** \name Flags for DMA buffer request */
+ /*@{*/
+ DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+ /*@}*/
+} drmDMAFlags;
+
+typedef enum {
+ DRM_PAGE_ALIGN = 0x01,
+ DRM_AGP_BUFFER = 0x02,
+ DRM_SG_BUFFER = 0x04,
+ DRM_FB_BUFFER = 0x08,
+ DRM_PCI_BUFFER_RO = 0x10
+} drmBufDescFlags;
+
+typedef enum {
+ DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+} drmLockFlags;
+
+typedef enum {
+ DRM_CONTEXT_PRESERVED = 0x01, /**< This context is preserved and
+ never swapped. */
+ DRM_CONTEXT_2DONLY = 0x02 /**< This context is for 2D rendering only. */
+} drm_context_tFlags, *drm_context_tFlagsPtr;
+
+typedef struct _drmBufDesc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+} drmBufDesc, *drmBufDescPtr;
+
+typedef struct _drmBufInfo {
+ int count; /**< Number of buffers described in list */
+ drmBufDescPtr list; /**< List of buffer descriptions */
+} drmBufInfo, *drmBufInfoPtr;
+
+typedef struct _drmBuf {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ drmAddress address; /**< Address */
+} drmBuf, *drmBufPtr;
+
+/**
+ * Buffer mapping information.
+ *
+ * Used by drmMapBufs() and drmUnmapBufs() to store information about the
+ * mapped buffers.
+ */
+typedef struct _drmBufMap {
+ int count; /**< Number of buffers mapped */
+ drmBufPtr list; /**< Buffers */
+} drmBufMap, *drmBufMapPtr;
+
+typedef struct _drmLock {
+ volatile unsigned int lock;
+ char padding[60];
+ /* This is big enough for most current (and future?) architectures:
+ DEC Alpha: 32 bytes
+ Intel Merced: ?
+ Intel P5/PPro/PII/PIII: 32 bytes
+ Intel StrongARM: 32 bytes
+ Intel i386/i486: 16 bytes
+ MIPS: 32 bytes (?)
+ Motorola 68k: 16 bytes
+ Motorola PowerPC: 32 bytes
+ Sun SPARC: 32 bytes
+ */
+} drmLock, *drmLockPtr;
+
+/**
+ * Indices here refer to the offset into
+ * list in drmBufInfo
+ */
+typedef struct _drmDMAReq {
+ drm_context_t context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int *send_list; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send, in bytes */
+ drmDMAFlags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size of buffers requested */
+ int *request_list; /**< Buffer information */
+ int *request_sizes; /**< Minimum acceptable sizes */
+ int granted_count; /**< Number of buffers granted at this size */
+} drmDMAReq, *drmDMAReqPtr;
+
+typedef struct _drmRegion {
+ drm_handle_t handle;
+ unsigned int offset;
+ drmSize size;
+ drmAddress map;
+} drmRegion, *drmRegionPtr;
+
+typedef struct _drmTextureRegion {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding; /**< Explicitly pad this out */
+ unsigned int age;
+} drmTextureRegion, *drmTextureRegionPtr;
+
+
+typedef enum {
+ DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ /* bits 1-6 are reserved for high crtcs */
+ DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
+ DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
+} drmVBlankSeqType;
+#define DRM_VBLANK_HIGH_CRTC_SHIFT 1
+
+typedef struct _drmVBlankReq {
+ drmVBlankSeqType type;
+ unsigned int sequence;
+ unsigned long signal;
+} drmVBlankReq, *drmVBlankReqPtr;
+
+typedef struct _drmVBlankReply {
+ drmVBlankSeqType type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+} drmVBlankReply, *drmVBlankReplyPtr;
+
+typedef union _drmVBlank {
+ drmVBlankReq request;
+ drmVBlankReply reply;
+} drmVBlank, *drmVBlankPtr;
+
+typedef struct _drmSetVersion {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+} drmSetVersion, *drmSetVersionPtr;
+
+#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
+
+#define DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+
+#if defined(__GNUC__) && (__GNUC__ >= 2)
+# if defined(__i386) || defined(__AMD64__) || defined(__x86_64__) || defined(__amd64__)
+ /* Reflect changes here to drmP.h */
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ int __dummy; /* Can't mark eax as clobbered */ \
+ __asm__ __volatile__( \
+ "lock ; cmpxchg %4,%1\n\t" \
+ "setnz %0" \
+ : "=d" (__ret), \
+ "=m" (__drm_dummy_lock(lock)), \
+ "=a" (__dummy) \
+ : "2" (old), \
+ "r" (new)); \
+ } while (0)
+
+#elif defined(__alpha__)
+
+#define DRM_CAS(lock, old, new, ret) \
+ do { \
+ int tmp, old32; \
+ __asm__ __volatile__( \
+ " addl $31, %5, %3\n" \
+ "1: ldl_l %0, %2\n" \
+ " cmpeq %0, %3, %1\n" \
+ " beq %1, 2f\n" \
+ " mov %4, %0\n" \
+ " stl_c %0, %2\n" \
+ " beq %0, 3f\n" \
+ " mb\n" \
+ "2: cmpeq %1, 0, %1\n" \
+ ".subsection 2\n" \
+ "3: br 1b\n" \
+ ".previous" \
+ : "=&r"(tmp), "=&r"(ret), \
+ "=m"(__drm_dummy_lock(lock)), \
+ "=&r"(old32) \
+ : "r"(new), "r"(old) \
+ : "memory"); \
+ } while (0)
+
+#elif defined(__sparc__)
+
+#define DRM_CAS(lock,old,new,__ret) \
+do { register unsigned int __old __asm("o0"); \
+ register unsigned int __new __asm("o1"); \
+ register volatile unsigned int *__lock __asm("o2"); \
+ __old = old; \
+ __new = new; \
+ __lock = (volatile unsigned int *)lock; \
+ __asm__ __volatile__( \
+ /*"cas [%2], %3, %0"*/ \
+ ".word 0xd3e29008\n\t" \
+ /*"membar #StoreStore | #StoreLoad"*/ \
+ ".word 0x8143e00a" \
+ : "=&r" (__new) \
+ : "0" (__new), \
+ "r" (__lock), \
+ "r" (__old) \
+ : "memory"); \
+ __ret = (__new != __old); \
+} while(0)
+
+#elif defined(__ia64__)
+
+#ifdef __INTEL_COMPILER
+/* this currently generates bad code (missing stop bits)... */
+#include <ia64intrin.h>
+
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ unsigned long __result, __old = (old) & 0xffffffff; \
+ __mf(); \
+ __result = _InterlockedCompareExchange_acq(&__drm_dummy_lock(lock), (new), __old);\
+ __ret = (__result) != (__old); \
+/* __ret = (__sync_val_compare_and_swap(&__drm_dummy_lock(lock), \
+ (old), (new)) \
+ != (old)); */\
+ } while (0)
+
+#else
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ unsigned int __result, __old = (old); \
+ __asm__ __volatile__( \
+ "mf\n" \
+ "mov ar.ccv=%2\n" \
+ ";;\n" \
+ "cmpxchg4.acq %0=%1,%3,ar.ccv" \
+ : "=r" (__result), "=m" (__drm_dummy_lock(lock)) \
+ : "r" ((unsigned long)__old), "r" (new) \
+ : "memory"); \
+ __ret = (__result) != (__old); \
+ } while (0)
+
+#endif
+
+#elif defined(__powerpc__)
+
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ __asm__ __volatile__( \
+ "sync;" \
+ "0: lwarx %0,0,%1;" \
+ " xor. %0,%3,%0;" \
+ " bne 1f;" \
+ " stwcx. %2,0,%1;" \
+ " bne- 0b;" \
+ "1: " \
+ "sync;" \
+ : "=&r"(__ret) \
+ : "r"(lock), "r"(new), "r"(old) \
+ : "cr0", "memory"); \
+ } while (0)
+
+# elif defined (__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined (__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
+ || defined (__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) \
+ || defined (__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7EM__)
+ /* excluding ARMv4/ARMv5 and lower (lacking ldrex/strex support) */
+ #undef DRM_DEV_MODE
+ #define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH)
+
+ #define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ __asm__ __volatile__ ( \
+ "1: ldrex %0, [%1]\n" \
+ " teq %0, %2\n" \
+ " ite eq\n" \
+ " strexeq %0, %3, [%1]\n" \
+ " movne %0, #1\n" \
+ : "=&r" (__ret) \
+ : "r" (lock), "r" (old), "r" (new) \
+ : "cc","memory"); \
+ } while (0)
+
+#endif /* architecture */
+#endif /* __GNUC__ >= 2 */
+
+#ifndef DRM_CAS
+#define DRM_CAS(lock,old,new,ret) do { ret=1; } while (0) /* FAST LOCK FAILS */
+#endif
+
+#if defined(__alpha__)
+#define DRM_CAS_RESULT(_result) long _result
+#elif defined(__powerpc__)
+#define DRM_CAS_RESULT(_result) int _result
+#else
+#define DRM_CAS_RESULT(_result) char _result
+#endif
+
+#define DRM_LIGHT_LOCK(fd,lock,context) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
+ if (__ret) drmGetLock(fd,context,0); \
+ } while(0)
+
+ /* This one counts fast locks -- for
+ benchmarking only. */
+#define DRM_LIGHT_LOCK_COUNT(fd,lock,context,count) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
+ if (__ret) drmGetLock(fd,context,0); \
+ else ++count; \
+ } while(0)
+
+#define DRM_LOCK(fd,lock,context,flags) \
+ do { \
+ if (flags) drmGetLock(fd,context,flags); \
+ else DRM_LIGHT_LOCK(fd,lock,context); \
+ } while(0)
+
+#define DRM_UNLOCK(fd,lock,context) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ DRM_CAS(lock,DRM_LOCK_HELD|context,context,__ret); \
+ if (__ret) drmUnlock(fd,context); \
+ } while(0)
+
+ /* Simple spin locks */
+#define DRM_SPINLOCK(spin,val) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ do { \
+ DRM_CAS(spin,0,val,__ret); \
+ if (__ret) while ((spin)->lock); \
+ } while (__ret); \
+ } while(0)
+
+#define DRM_SPINLOCK_TAKE(spin,val) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ int cur; \
+ do { \
+ cur = (*spin).lock; \
+ DRM_CAS(spin,cur,val,__ret); \
+ } while (__ret); \
+ } while(0)
+
+#define DRM_SPINLOCK_COUNT(spin,val,count,__ret) \
+ do { \
+ int __i; \
+ __ret = 1; \
+ for (__i = 0; __ret && __i < count; __i++) { \
+ DRM_CAS(spin,0,val,__ret); \
+ if (__ret) for (;__i < count && (spin)->lock; __i++); \
+ } \
+ } while(0)
+
+#define DRM_SPINUNLOCK(spin,val) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ if ((*spin).lock == val) { /* else server stole lock */ \
+ do { \
+ DRM_CAS(spin,val,0,__ret); \
+ } while (__ret); \
+ } \
+ } while(0)
+
+
+
+/* General user-level programmer's API: unprivileged */
+extern int drmAvailable(void);
+extern int drmOpen(const char *name, const char *busid);
+
+#define DRM_NODE_PRIMARY 0
+#define DRM_NODE_CONTROL 1 /* deprecated: never returned */
+#define DRM_NODE_RENDER 2
+#define DRM_NODE_MAX 3
+
+extern int drmOpenWithType(const char *name, const char *busid,
+ int type);
+
+extern int drmOpenControl(int minor); /* deprecated: always fails */
+extern int drmOpenRender(int minor);
+extern int drmClose(int fd);
+extern drmVersionPtr drmGetVersion(int fd);
+extern drmVersionPtr drmGetLibVersion(int fd);
+extern int drmGetCap(int fd, uint64_t capability, uint64_t *value);
+extern void drmFreeVersion(drmVersionPtr);
+extern int drmGetMagic(int fd, drm_magic_t * magic);
+extern char *drmGetBusid(int fd);
+extern int drmGetInterruptFromBusID(int fd, int busnum, int devnum,
+ int funcnum);
+extern int drmGetMap(int fd, int idx, drm_handle_t *offset,
+ drmSize *size, drmMapType *type,
+ drmMapFlags *flags, drm_handle_t *handle,
+ int *mtrr);
+extern int drmGetClient(int fd, int idx, int *auth, int *pid,
+ int *uid, unsigned long *magic,
+ unsigned long *iocs);
+extern int drmGetStats(int fd, drmStatsT *stats);
+extern int drmSetInterfaceVersion(int fd, drmSetVersion *version);
+extern int drmCommandNone(int fd, unsigned long drmCommandIndex);
+extern int drmCommandRead(int fd, unsigned long drmCommandIndex,
+ void *data, unsigned long size);
+extern int drmCommandWrite(int fd, unsigned long drmCommandIndex,
+ void *data, unsigned long size);
+extern int drmCommandWriteRead(int fd, unsigned long drmCommandIndex,
+ void *data, unsigned long size);
+
+/* General user-level programmer's API: X server (root) only */
+extern void drmFreeBusid(const char *busid);
+extern int drmSetBusid(int fd, const char *busid);
+extern int drmAuthMagic(int fd, drm_magic_t magic);
+extern int drmAddMap(int fd,
+ drm_handle_t offset,
+ drmSize size,
+ drmMapType type,
+ drmMapFlags flags,
+ drm_handle_t * handle);
+extern int drmRmMap(int fd, drm_handle_t handle);
+extern int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
+ drm_handle_t handle);
+
+extern int drmAddBufs(int fd, int count, int size,
+ drmBufDescFlags flags,
+ int agp_offset);
+extern int drmMarkBufs(int fd, double low, double high);
+extern int drmCreateContext(int fd, drm_context_t * handle);
+extern int drmSetContextFlags(int fd, drm_context_t context,
+ drm_context_tFlags flags);
+extern int drmGetContextFlags(int fd, drm_context_t context,
+ drm_context_tFlagsPtr flags);
+extern int drmAddContextTag(int fd, drm_context_t context, void *tag);
+extern int drmDelContextTag(int fd, drm_context_t context);
+extern void *drmGetContextTag(int fd, drm_context_t context);
+extern drm_context_t * drmGetReservedContextList(int fd, int *count);
+extern void drmFreeReservedContextList(drm_context_t *);
+extern int drmSwitchToContext(int fd, drm_context_t context);
+extern int drmDestroyContext(int fd, drm_context_t handle);
+extern int drmCreateDrawable(int fd, drm_drawable_t * handle);
+extern int drmDestroyDrawable(int fd, drm_drawable_t handle);
+extern int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
+ drm_drawable_info_type_t type,
+ unsigned int num, void *data);
+extern int drmCtlInstHandler(int fd, int irq);
+extern int drmCtlUninstHandler(int fd);
+extern int drmSetClientCap(int fd, uint64_t capability,
+ uint64_t value);
+
+extern int drmCrtcGetSequence(int fd, uint32_t crtcId,
+ uint64_t *sequence, uint64_t *ns);
+extern int drmCrtcQueueSequence(int fd, uint32_t crtcId,
+ uint32_t flags, uint64_t sequence,
+ uint64_t *sequence_queued,
+ uint64_t user_data);
+/* General user-level programmer's API: authenticated client and/or X */
+extern int drmMap(int fd,
+ drm_handle_t handle,
+ drmSize size,
+ drmAddressPtr address);
+extern int drmUnmap(drmAddress address, drmSize size);
+extern drmBufInfoPtr drmGetBufInfo(int fd);
+extern drmBufMapPtr drmMapBufs(int fd);
+extern int drmUnmapBufs(drmBufMapPtr bufs);
+extern int drmDMA(int fd, drmDMAReqPtr request);
+extern int drmFreeBufs(int fd, int count, int *list);
+extern int drmGetLock(int fd,
+ drm_context_t context,
+ drmLockFlags flags);
+extern int drmUnlock(int fd, drm_context_t context);
+extern int drmFinish(int fd, int context, drmLockFlags flags);
+extern int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
+ drm_handle_t * handle);
+
+/* AGP/GART support: X server (root) only */
+extern int drmAgpAcquire(int fd);
+extern int drmAgpRelease(int fd);
+extern int drmAgpEnable(int fd, unsigned long mode);
+extern int drmAgpAlloc(int fd, unsigned long size,
+ unsigned long type, unsigned long *address,
+ drm_handle_t *handle);
+extern int drmAgpFree(int fd, drm_handle_t handle);
+extern int drmAgpBind(int fd, drm_handle_t handle,
+ unsigned long offset);
+extern int drmAgpUnbind(int fd, drm_handle_t handle);
+
+/* AGP/GART info: authenticated client and/or X */
+extern int drmAgpVersionMajor(int fd);
+extern int drmAgpVersionMinor(int fd);
+extern unsigned long drmAgpGetMode(int fd);
+extern unsigned long drmAgpBase(int fd); /* Physical location */
+extern unsigned long drmAgpSize(int fd); /* Bytes */
+extern unsigned long drmAgpMemoryUsed(int fd);
+extern unsigned long drmAgpMemoryAvail(int fd);
+extern unsigned int drmAgpVendorId(int fd);
+extern unsigned int drmAgpDeviceId(int fd);
+
+/* PCI scatter/gather support: X server (root) only */
+extern int drmScatterGatherAlloc(int fd, unsigned long size,
+ drm_handle_t *handle);
+extern int drmScatterGatherFree(int fd, drm_handle_t handle);
+
+extern int drmWaitVBlank(int fd, drmVBlankPtr vbl);
+
+/* Support routines */
+extern void drmSetServerInfo(drmServerInfoPtr info);
+extern int drmError(int err, const char *label);
+extern void *drmMalloc(int size);
+extern void drmFree(void *pt);
+
+/* Hash table routines */
+extern void *drmHashCreate(void);
+extern int drmHashDestroy(void *t);
+extern int drmHashLookup(void *t, unsigned long key, void **value);
+extern int drmHashInsert(void *t, unsigned long key, void *value);
+extern int drmHashDelete(void *t, unsigned long key);
+extern int drmHashFirst(void *t, unsigned long *key, void **value);
+extern int drmHashNext(void *t, unsigned long *key, void **value);
+
+/* PRNG routines */
+extern void *drmRandomCreate(unsigned long seed);
+extern int drmRandomDestroy(void *state);
+extern unsigned long drmRandom(void *state);
+extern double drmRandomDouble(void *state);
+
+/* Skip list routines */
+
+extern void *drmSLCreate(void);
+extern int drmSLDestroy(void *l);
+extern int drmSLLookup(void *l, unsigned long key, void **value);
+extern int drmSLInsert(void *l, unsigned long key, void *value);
+extern int drmSLDelete(void *l, unsigned long key);
+extern int drmSLNext(void *l, unsigned long *key, void **value);
+extern int drmSLFirst(void *l, unsigned long *key, void **value);
+extern void drmSLDump(void *l);
+extern int drmSLLookupNeighbors(void *l, unsigned long key,
+ unsigned long *prev_key, void **prev_value,
+ unsigned long *next_key, void **next_value);
+
+extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
+extern int drmOpenOnceWithType(const char *BusID, int *newlyopened, int type);
+extern void drmCloseOnce(int fd);
+extern void drmMsg(const char *format, ...) DRM_PRINTFLIKE(1, 2);
+
+extern int drmSetMaster(int fd);
+extern int drmDropMaster(int fd);
+extern int drmIsMaster(int fd);
+
+#define DRM_EVENT_CONTEXT_VERSION 4
+
+typedef struct _drmEventContext {
+
+ /* This struct is versioned so we can add more pointers if we
+ * add more events. */
+ int version;
+
+ void (*vblank_handler)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data);
+
+ void (*page_flip_handler)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data);
+
+ void (*page_flip_handler2)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ unsigned int crtc_id,
+ void *user_data);
+
+ void (*sequence_handler)(int fd,
+ uint64_t sequence,
+ uint64_t ns,
+ uint64_t user_data);
+} drmEventContext, *drmEventContextPtr;
+
+extern int drmHandleEvent(int fd, drmEventContextPtr evctx);
+
+extern char *drmGetDeviceNameFromFd(int fd);
+
+/* Improved version of drmGetDeviceNameFromFd which attributes for any type of
+ * device/node - card or renderD.
+ */
+extern char *drmGetDeviceNameFromFd2(int fd);
+extern int drmGetNodeTypeFromFd(int fd);
+
+/* Convert between GEM handles and DMA-BUF file descriptors.
+ *
+ * Warning: since GEM handles are not reference-counted and are unique per
+ * DRM file description, the caller is expected to perform its own reference
+ * counting. drmPrimeFDToHandle is guaranteed to return the same handle for
+ * different FDs if they reference the same underlying buffer object. This
+ * could even be a buffer object originally created on the same DRM FD.
+ *
+ * When sharing a DRM FD with an API such as EGL or GBM, the caller must not
+ * use drmPrimeHandleToFD nor drmPrimeFDToHandle. A single user-space
+ * reference-counting implementation is necessary to avoid double-closing GEM
+ * handles.
+ *
+ * Two processes can't share the same DRM FD and both use it to create or
+ * import GEM handles, even when using a single user-space reference-counting
+ * implementation like GBM, because GBM doesn't share its state between
+ * processes.
+ */
+extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd);
+extern int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle);
+
+extern int drmCloseBufferHandle(int fd, uint32_t handle);
+
+extern char *drmGetPrimaryDeviceNameFromFd(int fd);
+extern char *drmGetRenderDeviceNameFromFd(int fd);
+
+#define DRM_BUS_PCI 0
+#define DRM_BUS_USB 1
+#define DRM_BUS_PLATFORM 2
+#define DRM_BUS_HOST1X 3
+
+typedef struct _drmPciBusInfo {
+ uint16_t domain;
+ uint8_t bus;
+ uint8_t dev;
+ uint8_t func;
+} drmPciBusInfo, *drmPciBusInfoPtr;
+
+typedef struct _drmPciDeviceInfo {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t subvendor_id;
+ uint16_t subdevice_id;
+ uint8_t revision_id;
+} drmPciDeviceInfo, *drmPciDeviceInfoPtr;
+
+typedef struct _drmUsbBusInfo {
+ uint8_t bus;
+ uint8_t dev;
+} drmUsbBusInfo, *drmUsbBusInfoPtr;
+
+typedef struct _drmUsbDeviceInfo {
+ uint16_t vendor;
+ uint16_t product;
+} drmUsbDeviceInfo, *drmUsbDeviceInfoPtr;
+
+#define DRM_PLATFORM_DEVICE_NAME_LEN 512
+
+typedef struct _drmPlatformBusInfo {
+ char fullname[DRM_PLATFORM_DEVICE_NAME_LEN];
+} drmPlatformBusInfo, *drmPlatformBusInfoPtr;
+
+typedef struct _drmPlatformDeviceInfo {
+ char **compatible; /* NULL terminated list of compatible strings */
+} drmPlatformDeviceInfo, *drmPlatformDeviceInfoPtr;
+
+#define DRM_HOST1X_DEVICE_NAME_LEN 512
+
+typedef struct _drmHost1xBusInfo {
+ char fullname[DRM_HOST1X_DEVICE_NAME_LEN];
+} drmHost1xBusInfo, *drmHost1xBusInfoPtr;
+
+typedef struct _drmHost1xDeviceInfo {
+ char **compatible; /* NULL terminated list of compatible strings */
+} drmHost1xDeviceInfo, *drmHost1xDeviceInfoPtr;
+
+typedef struct _drmDevice {
+ char **nodes; /* DRM_NODE_MAX sized array */
+ int available_nodes; /* DRM_NODE_* bitmask */
+ int bustype;
+ union {
+ drmPciBusInfoPtr pci;
+ drmUsbBusInfoPtr usb;
+ drmPlatformBusInfoPtr platform;
+ drmHost1xBusInfoPtr host1x;
+ } businfo;
+ union {
+ drmPciDeviceInfoPtr pci;
+ drmUsbDeviceInfoPtr usb;
+ drmPlatformDeviceInfoPtr platform;
+ drmHost1xDeviceInfoPtr host1x;
+ } deviceinfo;
+} drmDevice, *drmDevicePtr;
+
+extern int drmGetDevice(int fd, drmDevicePtr *device);
+extern void drmFreeDevice(drmDevicePtr *device);
+
+extern int drmGetDevices(drmDevicePtr devices[], int max_devices);
+extern void drmFreeDevices(drmDevicePtr devices[], int count);
+
+#define DRM_DEVICE_GET_PCI_REVISION (1 << 0)
+extern int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device);
+extern int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices);
+
+extern int drmGetDeviceFromDevId(dev_t dev_id, uint32_t flags, drmDevicePtr *device);
+
+/**
+ * Get the node type (DRM_NODE_PRIMARY or DRM_NODE_RENDER) from a device ID.
+ *
+ * Returns negative errno on error.
+ */
+extern int drmGetNodeTypeFromDevId(dev_t devid);
+
+/**
+ * Check if two drmDevice pointers represent the same DRM device.
+ *
+ * Returns 1 if the devices are equal, 0 otherwise.
+ */
+extern int drmDevicesEqual(drmDevicePtr a, drmDevicePtr b);
+
+extern int drmSyncobjCreate(int fd, uint32_t flags, uint32_t *handle);
+extern int drmSyncobjDestroy(int fd, uint32_t handle);
+extern int drmSyncobjHandleToFD(int fd, uint32_t handle, int *obj_fd);
+extern int drmSyncobjFDToHandle(int fd, int obj_fd, uint32_t *handle);
+
+extern int drmSyncobjImportSyncFile(int fd, uint32_t handle, int sync_file_fd);
+extern int drmSyncobjExportSyncFile(int fd, uint32_t handle, int *sync_file_fd);
+extern int drmSyncobjWait(int fd, uint32_t *handles, unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled);
+extern int drmSyncobjReset(int fd, const uint32_t *handles, uint32_t handle_count);
+extern int drmSyncobjSignal(int fd, const uint32_t *handles, uint32_t handle_count);
+extern int drmSyncobjTimelineSignal(int fd, const uint32_t *handles,
+ uint64_t *points, uint32_t handle_count);
+extern int drmSyncobjTimelineWait(int fd, uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled);
+extern int drmSyncobjQuery(int fd, uint32_t *handles, uint64_t *points,
+ uint32_t handle_count);
+extern int drmSyncobjQuery2(int fd, uint32_t *handles, uint64_t *points,
+ uint32_t handle_count, uint32_t flags);
+extern int drmSyncobjTransfer(int fd,
+ uint32_t dst_handle, uint64_t dst_point,
+ uint32_t src_handle, uint64_t src_point,
+ uint32_t flags);
+extern int drmSyncobjEventfd(int fd, uint32_t handle, uint64_t point, int ev_fd,
+ uint32_t flags);
+
+extern char *
+drmGetFormatModifierVendor(uint64_t modifier);
+
+extern char *
+drmGetFormatModifierName(uint64_t modifier);
+
+extern char *
+drmGetFormatName(uint32_t format);
+
+#ifndef fourcc_mod_get_vendor
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif