Index: head/sys/dev/drm/drm.h =================================================================== --- head/sys/dev/drm/drm.h (revision 338347) +++ head/sys/dev/drm/drm.h (revision 338348) @@ -1,1148 +1,1159 @@ /** * \file drm.h * Header for the Direct Rendering Manager * * \author Rickard E. (Rik) Faith * * \par Acknowledgments: * Dec 1999, Richard Henderson , move to generic \c cmpxchg. */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /** * \mainpage * * The Direct Rendering Manager (DRM) is a device-independent kernel-level * device driver that provides support for the XFree86 Direct Rendering * Infrastructure (DRI). * * The DRM supports the Direct Rendering Infrastructure (DRI) in four major * ways: * -# The DRM provides synchronized access to the graphics hardware via * the use of an optimized two-tiered lock. * -# The DRM enforces the DRI security policy for access to the graphics * hardware by only allowing authenticated X11 clients access to * restricted regions of memory. * -# The DRM provides a generic DMA engine, complete with multiple * queues and the ability to detect the need for an OpenGL context * switch. * -# The DRM is extensible via the use of small device-specific modules * that rely extensively on the API exported by the DRM module. * */ #ifndef _DRM_H_ #define _DRM_H_ #ifndef __user #define __user #endif #ifndef __iomem #define __iomem #endif #ifdef __GNUC__ # define DEPRECATED __attribute__ ((deprecated)) #else # define DEPRECATED #endif #if defined(__linux__) #include /* For _IO* macros */ #define DRM_IOCTL_NR(n) _IOC_NR(n) #define DRM_IOC_VOID _IOC_NONE #define DRM_IOC_READ _IOC_READ #define DRM_IOC_WRITE _IOC_WRITE #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) #include #define DRM_IOCTL_NR(n) ((n) & 0xff) #define DRM_IOC_VOID IOC_VOID #define DRM_IOC_READ IOC_OUT #define DRM_IOC_WRITE IOC_IN #define DRM_IOC_READWRITE IOC_INOUT #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #endif #ifdef __OpenBSD__ #define DRM_MAJOR 81 #endif #if defined(__linux__) || defined(__NetBSD__) #define DRM_MAJOR 226 #endif #define DRM_MAX_MINOR 15 #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) #if defined(__linux__) typedef unsigned int drm_handle_t; #else #include typedef unsigned long drm_handle_t; /**< To mapped regions */ #endif typedef unsigned int drm_context_t; /**< GLXContext handle */ typedef unsigned int drm_drawable_t; typedef unsigned int drm_magic_t; /**< Magic for authentication */ /** * Cliprect. * * \warning If you change this structure, make sure you change * XF86DRIClipRectRec in the server as well * * \note KW: Actually it's illegal to change either for * backwards-compatibility reasons. */ struct drm_clip_rect { unsigned short x1; unsigned short y1; unsigned short x2; unsigned short y2; }; /** * Texture region, */ struct drm_tex_region { unsigned char next; unsigned char prev; unsigned char in_use; unsigned char padding; unsigned int age; }; /** * Hardware lock. * * The lock structure is a simple cache-line aligned integer. To avoid * processor bus contention on a multiprocessor system, there should not be any * other data stored in the same cache line. */ struct drm_hw_lock { __volatile__ unsigned int lock; /**< lock variable */ char padding[60]; /**< Pad to cache line */ }; /* This is beyond ugly, and only works on GCC. However, it allows me to use * drm.h in places (i.e., in the X-server) where I can't use size_t. The real * fix is to use uint32_t instead of size_t, but that fix will break existing * LP64 (i.e., PowerPC64, SPARC64, Alpha, etc.) systems. That *will* * eventually happen, though. I chose 'unsigned long' to be the fallback type * because that works on all the platforms I know about. Hopefully, the * real fix will happen before that bites us. */ #ifdef __SIZE_TYPE__ # define DRM_SIZE_T __SIZE_TYPE__ #else # warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!" # define DRM_SIZE_T unsigned long #endif /** * DRM_IOCTL_VERSION ioctl argument type. * * \sa drmGetVersion(). */ struct drm_version { int version_major; /**< Major version */ int version_minor; /**< Minor version */ int version_patchlevel; /**< Patch level */ DRM_SIZE_T name_len; /**< Length of name buffer */ char __user *name; /**< Name of driver */ DRM_SIZE_T date_len; /**< Length of date buffer */ char __user *date; /**< User-space buffer to hold date */ DRM_SIZE_T desc_len; /**< Length of desc buffer */ char __user *desc; /**< User-space buffer to hold desc */ }; /** * DRM_IOCTL_GET_UNIQUE ioctl argument type. * * \sa drmGetBusid() and drmSetBusId(). */ struct drm_unique { DRM_SIZE_T unique_len; /**< Length of unique */ char __user *unique; /**< Unique name for driver instantiation */ }; #undef DRM_SIZE_T struct drm_list { int count; /**< Length of user-space structures */ struct drm_version __user *version; }; struct drm_block { int unused; }; /** * DRM_IOCTL_CONTROL ioctl argument type. * * \sa drmCtlInstHandler() and drmCtlUninstHandler(). */ struct drm_control { enum { DRM_ADD_COMMAND, DRM_RM_COMMAND, DRM_INST_HANDLER, DRM_UNINST_HANDLER } func; int irq; }; /** * Type of memory to map. */ enum drm_map_type { _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ _DRM_REGISTERS = 1, /**< no caching, no core dump */ _DRM_SHM = 2, /**< shared, cached */ _DRM_AGP = 3, /**< AGP/GART */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ _DRM_TTM = 6 }; /** * Memory mapping flags. */ enum drm_map_flags { _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ _DRM_READ_ONLY = 0x02, _DRM_LOCKED = 0x04, /**< shared, cached, locked */ _DRM_KERNEL = 0x08, /**< kernel requires access */ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ _DRM_REMOVABLE = 0x40, /**< Removable mapping */ _DRM_DRIVER = 0x80 /**< Managed by driver */ }; struct drm_ctx_priv_map { unsigned int ctx_id; /**< Context requesting private mapping */ void *handle; /**< Handle of map */ }; /** * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls * argument type. * * \sa drmAddMap(). */ struct drm_map { unsigned long offset; /**< Requested physical address (0 for SAREA)*/ unsigned long size; /**< Requested physical size (bytes) */ enum drm_map_type type; /**< Type of memory to map */ enum drm_map_flags flags; /**< Flags */ void *handle; /**< User-space: "Handle" to pass to mmap() */ /**< Kernel-space: kernel-virtual address */ int mtrr; /**< MTRR slot used */ /* Private data */ }; /** * DRM_IOCTL_GET_CLIENT ioctl argument type. */ struct drm_client { int idx; /**< Which client desired? */ int auth; /**< Is client authenticated? */ unsigned long pid; /**< Process ID */ unsigned long uid; /**< User ID */ unsigned long magic; /**< Magic */ unsigned long iocs; /**< Ioctl count */ }; enum drm_stat_type { _DRM_STAT_LOCK, _DRM_STAT_OPENS, _DRM_STAT_CLOSES, _DRM_STAT_IOCTLS, _DRM_STAT_LOCKS, _DRM_STAT_UNLOCKS, _DRM_STAT_VALUE, /**< Generic value */ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ _DRM_STAT_IRQ, /**< IRQ */ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ _DRM_STAT_DMA, /**< DMA */ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ _DRM_STAT_MISSED /**< Missed DMA opportunity */ /* Add to the *END* of the list */ }; /** * DRM_IOCTL_GET_STATS ioctl argument type. */ struct drm_stats { unsigned long count; struct { unsigned long value; enum drm_stat_type type; } data[15]; }; /** * Hardware locking flags. */ enum drm_lock_flags { _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ /* These *HALT* flags aren't supported yet -- they will be used to support the full-screen DGA-like mode. */ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ }; /** * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. * * \sa drmGetLock() and drmUnlock(). */ struct drm_lock { int context; enum drm_lock_flags flags; }; /** * DMA flags * * \warning * These values \e must match xf86drm.h. * * \sa drm_dma. */ enum drm_dma_flags { /* Flags for DMA buffer dispatch */ _DRM_DMA_BLOCK = 0x01, /**< * Block until buffer dispatched. * * \note The buffer may not yet have * been processed by the hardware -- * getting a hardware lock with the * hardware quiescent will ensure * that the buffer has been * processed. */ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ /* Flags for DMA buffer request */ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ }; /** * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. * * \sa drmAddBufs(). */ struct drm_buf_desc { int count; /**< Number of buffers of this size */ int size; /**< Size in bytes */ int low_mark; /**< Low water mark */ int high_mark; /**< High water mark */ enum { _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ } flags; unsigned long agp_start; /**< * Start address of where the AGP buffers are * in the AGP aperture */ }; /** * DRM_IOCTL_INFO_BUFS ioctl argument type. */ struct drm_buf_info { int count; /**< Number of buffers described in list */ struct drm_buf_desc __user *list; /**< List of buffer descriptions */ }; /** * DRM_IOCTL_FREE_BUFS ioctl argument type. */ struct drm_buf_free { int count; int __user *list; }; /** * Buffer information * * \sa drm_buf_map. */ struct drm_buf_pub { int idx; /**< Index into the master buffer list */ int total; /**< Buffer size */ int used; /**< Amount of buffer in use (for DMA) */ void __user *address; /**< Address of buffer */ }; /** * DRM_IOCTL_MAP_BUFS ioctl argument type. */ struct drm_buf_map { int count; /**< Length of the buffer list */ #if defined(__cplusplus) void __user *c_virtual; #else void __user *virtual; /**< Mmap'd area in user-virtual */ #endif struct drm_buf_pub __user *list; /**< Buffer information */ }; /** * DRM_IOCTL_DMA ioctl argument type. * * Indices here refer to the offset into the buffer list in drm_buf_get. * * \sa drmDMA(). */ struct drm_dma { int context; /**< Context handle */ int send_count; /**< Number of buffers to send */ int __user *send_indices; /**< List of handles to buffers */ int __user *send_sizes; /**< Lengths of data to send */ enum drm_dma_flags flags; /**< Flags */ int request_count; /**< Number of buffers requested */ int request_size; /**< Desired size for buffers */ int __user *request_indices; /**< Buffer information */ int __user *request_sizes; int granted_count; /**< Number of buffers granted */ }; enum drm_ctx_flags { _DRM_CONTEXT_PRESERVED = 0x01, _DRM_CONTEXT_2DONLY = 0x02 }; /** * DRM_IOCTL_ADD_CTX ioctl argument type. * * \sa drmCreateContext() and drmDestroyContext(). */ struct drm_ctx { drm_context_t handle; enum drm_ctx_flags flags; }; /** * DRM_IOCTL_RES_CTX ioctl argument type. */ struct drm_ctx_res { int count; struct drm_ctx __user *contexts; }; /** * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. */ struct drm_draw { drm_drawable_t handle; }; /** * DRM_IOCTL_UPDATE_DRAW ioctl argument type. */ typedef enum { DRM_DRAWABLE_CLIPRECTS, } drm_drawable_info_type_t; struct drm_update_draw { drm_drawable_t handle; unsigned int type; unsigned int num; unsigned long long data; }; /** * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. */ struct drm_auth { drm_magic_t magic; }; /** * DRM_IOCTL_IRQ_BUSID ioctl argument type. * * \sa drmGetInterruptFromBusID(). */ struct drm_irq_busid { int irq; /**< IRQ number */ int busnum; /**< bus number */ int devnum; /**< device number */ int funcnum; /**< function number */ }; enum drm_vblank_seq_type { _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ }; #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ _DRM_VBLANK_NEXTONMISS) struct drm_wait_vblank_request { enum drm_vblank_seq_type type; unsigned int sequence; unsigned long signal; }; struct drm_wait_vblank_reply { enum drm_vblank_seq_type type; unsigned int sequence; long tval_sec; long tval_usec; }; /** * DRM_IOCTL_WAIT_VBLANK ioctl argument type. * * \sa drmWaitVBlank(). */ union drm_wait_vblank { struct drm_wait_vblank_request request; struct drm_wait_vblank_reply reply; }; #define _DRM_PRE_MODESET 1 #define _DRM_POST_MODESET 2 /** * DRM_IOCTL_MODESET_CTL ioctl argument type * * \sa drmModesetCtl(). */ struct drm_modeset_ctl { uint32_t crtc; uint32_t cmd; }; /** * DRM_IOCTL_AGP_ENABLE ioctl argument type. * * \sa drmAgpEnable(). */ struct drm_agp_mode { unsigned long mode; /**< AGP mode */ }; /** * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. * * \sa drmAgpAlloc() and drmAgpFree(). */ struct drm_agp_buffer { unsigned long size; /**< In bytes -- will round to page boundary */ unsigned long handle; /**< Used for binding / unbinding */ unsigned long type; /**< Type of memory to allocate */ unsigned long physical; /**< Physical used by i810 */ }; /** * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. * * \sa drmAgpBind() and drmAgpUnbind(). */ struct drm_agp_binding { unsigned long handle; /**< From drm_agp_buffer */ unsigned long offset; /**< In bytes -- will round to page boundary */ }; /** * DRM_IOCTL_AGP_INFO ioctl argument type. * * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), * drmAgpVendorId() and drmAgpDeviceId(). */ struct drm_agp_info { int agp_version_major; int agp_version_minor; unsigned long mode; unsigned long aperture_base; /**< physical address */ unsigned long aperture_size; /**< bytes */ unsigned long memory_allowed; /**< bytes */ unsigned long memory_used; /** \name PCI information */ /*@{ */ unsigned short id_vendor; unsigned short id_device; /*@} */ }; /** * DRM_IOCTL_SG_ALLOC ioctl argument type. */ struct drm_scatter_gather { unsigned long size; /**< In bytes -- will round to page boundary */ unsigned long handle; /**< Used for mapping / unmapping */ }; /** * DRM_IOCTL_SET_VERSION ioctl argument type. */ struct drm_set_version { int drm_di_major; int drm_di_minor; int drm_dd_major; int drm_dd_minor; }; #define DRM_FENCE_FLAG_EMIT 0x00000001 #define DRM_FENCE_FLAG_SHAREABLE 0x00000002 /** * On hardware with no interrupt events for operation completion, * indicates that the kernel should sleep while waiting for any blocking * operation to complete rather than spinning. * * Has no effect otherwise. */ #define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 #define DRM_FENCE_FLAG_NO_USER 0x00000010 /* Reserved for driver use */ #define DRM_FENCE_MASK_DRIVER 0xFF000000 #define DRM_FENCE_TYPE_EXE 0x00000001 struct drm_fence_arg { unsigned int handle; unsigned int fence_class; unsigned int type; unsigned int flags; unsigned int signaled; unsigned int error; unsigned int sequence; unsigned int pad64; uint64_t expand_pad[2]; /*Future expansion */ }; /* Buffer permissions, referring to how the GPU uses the buffers. * these translate to fence types used for the buffers. * Typically a texture buffer is read, A destination buffer is write and * a command (batch-) buffer is exe. Can be or-ed together. */ #define DRM_BO_FLAG_READ (1ULL << 0) #define DRM_BO_FLAG_WRITE (1ULL << 1) #define DRM_BO_FLAG_EXE (1ULL << 2) /* * All of the bits related to access mode */ #define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) /* * Status flags. Can be read to determine the actual state of a buffer. * Can also be set in the buffer mask before validation. */ /* * Mask: Never evict this buffer. Not even with force. This type of buffer is only * available to root and must be manually removed before buffer manager shutdown * or lock. * Flags: Acknowledge */ #define DRM_BO_FLAG_NO_EVICT (1ULL << 4) /* * Mask: Require that the buffer is placed in mappable memory when validated. * If not set the buffer may or may not be in mappable memory when validated. * Flags: If set, the buffer is in mappable memory. */ #define DRM_BO_FLAG_MAPPABLE (1ULL << 5) /* Mask: The buffer should be shareable with other processes. * Flags: The buffer is shareable with other processes. */ #define DRM_BO_FLAG_SHAREABLE (1ULL << 6) /* Mask: If set, place the buffer in cache-coherent memory if available. * If clear, never place the buffer in cache coherent memory if validated. * Flags: The buffer is currently in cache-coherent memory. */ #define DRM_BO_FLAG_CACHED (1ULL << 7) /* Mask: Make sure that every time this buffer is validated, * it ends up on the same location provided that the memory mask is the same. * The buffer will also not be evicted when claiming space for * other buffers. Basically a pinned buffer but it may be thrown out as * part of buffer manager shutdown or locking. * Flags: Acknowledge. */ #define DRM_BO_FLAG_NO_MOVE (1ULL << 8) /* Mask: Make sure the buffer is in cached memory when mapped. In conjunction * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART * with unsnooped PTEs instead of snooped, by using chipset-specific cache * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED, * as the eviction to local memory (TTM unbind) on map is just a side effect * to prevent aggressive cache prefetch from the GPU disturbing the cache * management that the DRM is doing. * * Flags: Acknowledge. * Buffers allocated with this flag should not be used for suballocators * This type may have issues on CPUs with over-aggressive caching * http://marc.info/?l=linux-kernel&m=102376926732464&w=2 */ #define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19) /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. * Flags: Acknowledge. */ #define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) /* * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. * Flags: Acknowledge. */ #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) #define DRM_BO_FLAG_TILE (1ULL << 15) /* * Memory type flags that can be or'ed together in the mask, but only * one appears in flags. */ /* System memory */ #define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) /* Translation table memory */ #define DRM_BO_FLAG_MEM_TT (1ULL << 25) /* Vram memory */ #define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) /* Up to the driver to define. */ #define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) #define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) #define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) #define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) #define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) /* We can add more of these now with a 64-bit flag type */ /* * This is a mask covering all of the memory type flags; easier to just * use a single constant than a bunch of | values. It covers * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 */ #define DRM_BO_MASK_MEM 0x00000000FF000000ULL /* * This adds all of the CPU-mapping options in with the memory * type to label all bits which change how the page gets mapped */ #define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \ DRM_BO_FLAG_CACHED_MAPPED | \ DRM_BO_FLAG_CACHED | \ DRM_BO_FLAG_MAPPABLE) /* Driver-private flags */ #define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL /* * Don't block on validate and map. Instead, return EBUSY. */ #define DRM_BO_HINT_DONT_BLOCK 0x00000002 /* * Don't place this buffer on the unfenced list. This means * that the buffer will not end up having a fence associated * with it as a result of this operation */ #define DRM_BO_HINT_DONT_FENCE 0x00000004 /** * On hardware with no interrupt events for operation completion, * indicates that the kernel should sleep while waiting for any blocking * operation to complete rather than spinning. * * Has no effect otherwise. */ #define DRM_BO_HINT_WAIT_LAZY 0x00000008 /* * The client has compute relocations referring to this buffer using the * offset in the presumed_offset field. If that offset ends up matching * where this buffer lands, the kernel is free to skip executing those * relocations */ #define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010 #define DRM_BO_INIT_MAGIC 0xfe769812 #define DRM_BO_INIT_MAJOR 1 #define DRM_BO_INIT_MINOR 0 #define DRM_BO_INIT_PATCH 0 struct drm_bo_info_req { uint64_t mask; uint64_t flags; unsigned int handle; unsigned int hint; unsigned int fence_class; unsigned int desired_tile_stride; unsigned int tile_info; unsigned int pad64; uint64_t presumed_offset; }; struct drm_bo_create_req { uint64_t flags; uint64_t size; uint64_t buffer_start; unsigned int hint; unsigned int page_alignment; }; /* * Reply flags */ #define DRM_BO_REP_BUSY 0x00000001 struct drm_bo_info_rep { uint64_t flags; uint64_t proposed_flags; uint64_t size; uint64_t offset; uint64_t arg_handle; uint64_t buffer_start; unsigned int handle; unsigned int fence_flags; unsigned int rep_flags; unsigned int page_alignment; unsigned int desired_tile_stride; unsigned int hw_tile_stride; unsigned int tile_info; unsigned int pad64; uint64_t expand_pad[4]; /*Future expansion */ }; struct drm_bo_arg_rep { struct drm_bo_info_rep bo_info; int ret; unsigned int pad64; }; struct drm_bo_create_arg { union { struct drm_bo_create_req req; struct drm_bo_info_rep rep; } d; }; struct drm_bo_handle_arg { unsigned int handle; }; struct drm_bo_reference_info_arg { union { struct drm_bo_handle_arg req; struct drm_bo_info_rep rep; } d; }; struct drm_bo_map_wait_idle_arg { union { struct drm_bo_info_req req; struct drm_bo_info_rep rep; } d; }; struct drm_bo_op_req { enum { drm_bo_validate, drm_bo_fence, drm_bo_ref_fence, } op; unsigned int arg_handle; struct drm_bo_info_req bo_req; }; struct drm_bo_op_arg { uint64_t next; union { struct drm_bo_op_req req; struct drm_bo_arg_rep rep; } d; int handled; unsigned int pad64; }; #define DRM_BO_MEM_LOCAL 0 #define DRM_BO_MEM_TT 1 #define DRM_BO_MEM_VRAM 2 #define DRM_BO_MEM_PRIV0 3 #define DRM_BO_MEM_PRIV1 4 #define DRM_BO_MEM_PRIV2 5 #define DRM_BO_MEM_PRIV3 6 #define DRM_BO_MEM_PRIV4 7 #define DRM_BO_MEM_TYPES 8 /* For now. */ #define DRM_BO_LOCK_UNLOCK_BM (1 << 0) #define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) struct drm_bo_version_arg { uint32_t major; uint32_t minor; uint32_t patchlevel; }; struct drm_mm_type_arg { unsigned int mem_type; unsigned int lock_flags; }; struct drm_mm_init_arg { unsigned int magic; unsigned int major; unsigned int minor; unsigned int mem_type; uint64_t p_offset; uint64_t p_size; }; struct drm_mm_info_arg { unsigned int mem_type; uint64_t p_size; }; struct drm_gem_close { /** Handle of the object to be closed. */ uint32_t handle; uint32_t pad; }; struct drm_gem_flink { /** Handle for the object being named */ uint32_t handle; /** Returned global name */ uint32_t name; }; struct drm_gem_open { /** Name of object being opened */ uint32_t name; /** Returned handle for the object */ uint32_t handle; /** Returned size of the object */ uint64_t size; }; /** * \name Ioctls Definitions */ /*@{*/ #define DRM_IOCTL_BASE 'd' #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) #define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) #define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) #define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) #define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) #define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) #define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) #define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) #define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) #define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) #define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) #define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) #define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) #define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) #define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) #define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) #define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) #define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) #define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) #define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) #define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) #define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) #define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) #define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) #define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) #define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) #define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg) #define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg) #define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg) #define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) #define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) #define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) #define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) #define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) #define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg) #define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg) #define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg) #define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) #define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) #define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) #define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) #define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) #define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) #define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg) #define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) #define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) #define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg) #define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg) /*@}*/ /** * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x99. * Generic IOCTLS restart at 0xA0. * * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and * drmCommandReadWrite(). */ #define DRM_COMMAND_BASE 0x40 #define DRM_COMMAND_END 0xA0 /* typedef area */ #ifndef __KERNEL__ typedef struct drm_clip_rect drm_clip_rect_t; typedef struct drm_tex_region drm_tex_region_t; typedef struct drm_hw_lock drm_hw_lock_t; typedef struct drm_version drm_version_t; typedef struct drm_unique drm_unique_t; typedef struct drm_list drm_list_t; typedef struct drm_block drm_block_t; typedef struct drm_control drm_control_t; typedef enum drm_map_type drm_map_type_t; typedef enum drm_map_flags drm_map_flags_t; typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; typedef struct drm_map drm_map_t; typedef struct drm_client drm_client_t; typedef enum drm_stat_type drm_stat_type_t; typedef struct drm_stats drm_stats_t; typedef enum drm_lock_flags drm_lock_flags_t; typedef struct drm_lock drm_lock_t; typedef enum drm_dma_flags drm_dma_flags_t; typedef struct drm_buf_desc drm_buf_desc_t; typedef struct drm_buf_info drm_buf_info_t; typedef struct drm_buf_free drm_buf_free_t; typedef struct drm_buf_pub drm_buf_pub_t; typedef struct drm_buf_map drm_buf_map_t; typedef struct drm_dma drm_dma_t; typedef union drm_wait_vblank drm_wait_vblank_t; typedef struct drm_agp_mode drm_agp_mode_t; typedef enum drm_ctx_flags drm_ctx_flags_t; typedef struct drm_ctx drm_ctx_t; typedef struct drm_ctx_res drm_ctx_res_t; typedef struct drm_draw drm_draw_t; typedef struct drm_update_draw drm_update_draw_t; typedef struct drm_auth drm_auth_t; typedef struct drm_irq_busid drm_irq_busid_t; typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; typedef struct drm_agp_buffer drm_agp_buffer_t; typedef struct drm_agp_binding drm_agp_binding_t; typedef struct drm_agp_info drm_agp_info_t; typedef struct drm_scatter_gather drm_scatter_gather_t; typedef struct drm_set_version drm_set_version_t; typedef struct drm_fence_arg drm_fence_arg_t; typedef struct drm_mm_type_arg drm_mm_type_arg_t; typedef struct drm_mm_init_arg drm_mm_init_arg_t; typedef enum drm_bo_type drm_bo_type_t; #endif +#define DRM_PORT "graphics/drm-legacy-kmod" + +#define DRM_OBSOLETE(dev) \ + do { \ + device_printf(dev, "=======================================================\n"); \ + device_printf(dev, "This code is obsolete abandonware. Install the " DRM_PORT " pkg\n"); \ + device_printf(dev, "=======================================================\n"); \ + gone_in_dev(dev, 13, "drm drivers"); \ + } while (0) + + #endif Index: head/sys/dev/drm/drm_drv.c =================================================================== --- head/sys/dev/drm/drm_drv.c (revision 338347) +++ head/sys/dev/drm/drm_drv.c (revision 338348) @@ -1,833 +1,834 @@ /*- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); /** @file drm_drv.c * The catch-all file for DRM device support, including module setup/teardown, * open/close, and ioctl dispatch. */ #include #include "dev/drm/drmP.h" #include "dev/drm/drm.h" #include "dev/drm/drm_sarea.h" #ifdef DRM_DEBUG_DEFAULT_ON int drm_debug_flag = 1; #else int drm_debug_flag = 0; #endif static int drm_load(struct drm_device *dev); static void drm_unload(struct drm_device *dev); static drm_pci_id_list_t *drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist); MODULE_VERSION(drm, 1); MODULE_DEPEND(drm, agp, 1, 1, 1); MODULE_DEPEND(drm, pci, 1, 1, 1); MODULE_DEPEND(drm, mem, 1, 1, 1); static drm_ioctl_desc_t drm_ioctls[256] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), }; static struct cdevsw drm_cdevsw = { .d_version = D_VERSION, .d_open = drm_open, .d_read = drm_read, .d_ioctl = drm_ioctl, .d_poll = drm_poll, .d_mmap = drm_mmap, .d_name = "drm", .d_flags = D_TRACKCLOSE }; static int drm_msi = 1; /* Enable by default. */ SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device"); SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1, "Enable MSI interrupts for drm devices"); static struct drm_msi_blacklist_entry drm_msi_blacklist[] = { {0x8086, 0x2772}, /* Intel i945G */ \ {0x8086, 0x27A2}, /* Intel i945GM */ \ {0x8086, 0x27AE}, /* Intel i945GME */ \ {0, 0} }; static int drm_msi_is_blacklisted(int vendor, int device) { int i = 0; for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) { if ((drm_msi_blacklist[i].vendor == vendor) && (drm_msi_blacklist[i].device == device)) { return 1; } } return 0; } int drm_probe(device_t kdev, drm_pci_id_list_t *idlist) { drm_pci_id_list_t *id_entry; int vendor, device; vendor = pci_get_vendor(kdev); device = pci_get_device(kdev); if (pci_get_class(kdev) != PCIC_DISPLAY || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA) return ENXIO; id_entry = drm_find_description(vendor, device, idlist); if (id_entry != NULL) { if (!device_get_desc(kdev)) { DRM_DEBUG("desc : %s\n", device_get_desc(kdev)); device_set_desc(kdev, id_entry->name); } - return 0; + DRM_OBSOLETE(kdev); + return BUS_PROBE_GENERIC; } return ENXIO; } int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) { struct drm_device *dev; drm_pci_id_list_t *id_entry; int unit, msicount; unit = device_get_unit(kdev); dev = device_get_softc(kdev); dev->device = kdev; dev->devnode = make_dev(&drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID, DRM_DEV_MODE, "dri/card%d", unit); dev->devnode->si_drv1 = dev; dev->pci_domain = pci_get_domain(dev->device); dev->pci_bus = pci_get_bus(dev->device); dev->pci_slot = pci_get_slot(dev->device); dev->pci_func = pci_get_function(dev->device); dev->pci_vendor = pci_get_vendor(dev->device); dev->pci_device = pci_get_device(dev->device); if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) { if (drm_msi && !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) { msicount = pci_msi_count(dev->device); DRM_DEBUG("MSI count = %d\n", msicount); if (msicount > 1) msicount = 1; if (pci_alloc_msi(dev->device, &msicount) == 0) { DRM_INFO("MSI enabled %d message(s)\n", msicount); dev->msi_enabled = 1; dev->irqrid = 1; } } dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ, &dev->irqrid, RF_SHAREABLE); if (!dev->irqr) { return ENOENT; } dev->irq = (int) rman_get_start(dev->irqr); } mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF); mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF); mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF); mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF); id_entry = drm_find_description(dev->pci_vendor, dev->pci_device, idlist); dev->id_entry = id_entry; return drm_load(dev); } int drm_detach(device_t kdev) { struct drm_device *dev; dev = device_get_softc(kdev); drm_unload(dev); if (dev->irqr) { bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, dev->irqr); if (dev->msi_enabled) { pci_release_msi(dev->device); DRM_INFO("MSI released\n"); } } return 0; } #ifndef DRM_DEV_NAME #define DRM_DEV_NAME "drm" #endif devclass_t drm_devclass; drm_pci_id_list_t *drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist) { int i = 0; for (i = 0; idlist[i].vendor != 0; i++) { if ((idlist[i].vendor == vendor) && ((idlist[i].device == device) || (idlist[i].device == 0))) { return &idlist[i]; } } return NULL; } static int drm_firstopen(struct drm_device *dev) { drm_local_map_t *map; int i; DRM_SPINLOCK_ASSERT(&dev->dev_lock); /* prebuild the SAREA */ i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); if (i != 0) return i; if (dev->driver->firstopen) dev->driver->firstopen(dev); dev->buf_use = 0; if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { i = drm_dma_setup(dev); if (i != 0) return i; } for (i = 0; i < DRM_HASH_SIZE; i++) { dev->magiclist[i].head = NULL; dev->magiclist[i].tail = NULL; } dev->lock.lock_queue = 0; dev->irq_enabled = 0; dev->context_flag = 0; dev->last_context = 0; dev->if_version = 0; dev->buf_sigio = NULL; DRM_DEBUG("\n"); return 0; } static int drm_lastclose(struct drm_device *dev) { drm_magic_entry_t *pt, *next; drm_local_map_t *map, *mapsave; int i; DRM_SPINLOCK_ASSERT(&dev->dev_lock); DRM_DEBUG("\n"); if (dev->driver->lastclose != NULL) dev->driver->lastclose(dev); if (dev->irq_enabled) drm_irq_uninstall(dev); if (dev->unique) { free(dev->unique, DRM_MEM_DRIVER); dev->unique = NULL; dev->unique_len = 0; } /* Clear pid list */ for (i = 0; i < DRM_HASH_SIZE; i++) { for (pt = dev->magiclist[i].head; pt; pt = next) { next = pt->next; free(pt, DRM_MEM_MAGIC); } dev->magiclist[i].head = dev->magiclist[i].tail = NULL; } DRM_UNLOCK(); drm_drawable_free_all(dev); DRM_LOCK(); /* Clear AGP information */ if (dev->agp) { drm_agp_mem_t *entry; drm_agp_mem_t *nexte; /* Remove AGP resources, but leave dev->agp intact until * drm_unload is called. */ for (entry = dev->agp->memory; entry; entry = nexte) { nexte = entry->next; if (entry->bound) drm_agp_unbind_memory(entry->handle); drm_agp_free_memory(entry->handle); free(entry, DRM_MEM_AGPLISTS); } dev->agp->memory = NULL; if (dev->agp->acquired) drm_agp_release(dev); dev->agp->acquired = 0; dev->agp->enabled = 0; } if (dev->sg != NULL) { drm_sg_cleanup(dev->sg); dev->sg = NULL; } TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) { if (!(map->flags & _DRM_DRIVER)) drm_rmmap(dev, map); } drm_dma_takedown(dev); if (dev->lock.hw_lock) { dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.file_priv = NULL; DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); } return 0; } static int drm_load(struct drm_device *dev) { int i, retcode; DRM_DEBUG("\n"); TAILQ_INIT(&dev->maplist); dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL); if (dev->map_unrhdr == NULL) { DRM_ERROR("Couldn't allocate map number allocator\n"); return EINVAL; } drm_mem_init(); drm_sysctl_init(dev); TAILQ_INIT(&dev->files); dev->counters = 6; dev->types[0] = _DRM_STAT_LOCK; dev->types[1] = _DRM_STAT_OPENS; dev->types[2] = _DRM_STAT_CLOSES; dev->types[3] = _DRM_STAT_IOCTLS; dev->types[4] = _DRM_STAT_LOCKS; dev->types[5] = _DRM_STAT_UNLOCKS; for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++) atomic_set(&dev->counts[i], 0); if (dev->driver->load != NULL) { DRM_LOCK(); /* Shared code returns -errno. */ retcode = -dev->driver->load(dev, dev->id_entry->driver_private); if (pci_enable_busmaster(dev->device)) DRM_ERROR("Request to enable bus-master failed.\n"); DRM_UNLOCK(); if (retcode != 0) goto error; } if (drm_core_has_AGP(dev)) { if (drm_device_is_agp(dev)) dev->agp = drm_agp_init(); if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && dev->agp == NULL) { DRM_ERROR("Card isn't AGP, or couldn't initialize " "AGP.\n"); retcode = ENOMEM; goto error; } if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) { if (drm_mtrr_add(dev->agp->info.ai_aperture_base, dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0) dev->agp->mtrr = 1; } } retcode = drm_ctxbitmap_init(dev); if (retcode != 0) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); goto error; } dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL); if (dev->drw_unrhdr == NULL) { DRM_ERROR("Couldn't allocate drawable number allocator\n"); goto error; } DRM_INFO("Initialized %s %d.%d.%d %s\n", dev->driver->name, dev->driver->major, dev->driver->minor, dev->driver->patchlevel, dev->driver->date); return 0; error: drm_sysctl_cleanup(dev); DRM_LOCK(); drm_lastclose(dev); DRM_UNLOCK(); destroy_dev(dev->devnode); mtx_destroy(&dev->drw_lock); mtx_destroy(&dev->vbl_lock); mtx_destroy(&dev->irq_lock); mtx_destroy(&dev->dev_lock); return retcode; } static void drm_unload(struct drm_device *dev) { int i; DRM_DEBUG("\n"); drm_sysctl_cleanup(dev); destroy_dev(dev->devnode); drm_ctxbitmap_cleanup(dev); if (dev->agp && dev->agp->mtrr) { int __unused retcode; retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base, dev->agp->info.ai_aperture_size, DRM_MTRR_WC); DRM_DEBUG("mtrr_del = %d", retcode); } drm_vblank_cleanup(dev); DRM_LOCK(); drm_lastclose(dev); DRM_UNLOCK(); /* Clean up PCI resources allocated by drm_bufs.c. We're not really * worried about resource consumption while the DRM is inactive (between * lastclose and firstopen or unload) because these aren't actually * taking up KVA, just keeping the PCI resource allocated. */ for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) { if (dev->pcir[i] == NULL) continue; bus_release_resource(dev->device, SYS_RES_MEMORY, dev->pcirid[i], dev->pcir[i]); dev->pcir[i] = NULL; } if (dev->agp) { free(dev->agp, DRM_MEM_AGPLISTS); dev->agp = NULL; } if (dev->driver->unload != NULL) { DRM_LOCK(); dev->driver->unload(dev); DRM_UNLOCK(); } delete_unrhdr(dev->drw_unrhdr); delete_unrhdr(dev->map_unrhdr); drm_mem_uninit(); if (pci_disable_busmaster(dev->device)) DRM_ERROR("Request to disable bus-master failed.\n"); mtx_destroy(&dev->drw_lock); mtx_destroy(&dev->vbl_lock); mtx_destroy(&dev->irq_lock); mtx_destroy(&dev->dev_lock); } int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_version *version = data; int len; #define DRM_COPY( name, value ) \ len = strlen( value ); \ if ( len > name##_len ) len = name##_len; \ name##_len = strlen( value ); \ if ( len && name ) { \ if ( DRM_COPY_TO_USER( name, value, len ) ) \ return EFAULT; \ } version->version_major = dev->driver->major; version->version_minor = dev->driver->minor; version->version_patchlevel = dev->driver->patchlevel; DRM_COPY(version->name, dev->driver->name); DRM_COPY(version->date, dev->driver->date); DRM_COPY(version->desc, dev->driver->desc); return 0; } int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) { struct drm_device *dev = NULL; int retcode = 0; dev = kdev->si_drv1; DRM_DEBUG("open_count = %d\n", dev->open_count); retcode = drm_open_helper(kdev, flags, fmt, p, dev); if (!retcode) { atomic_inc(&dev->counts[_DRM_STAT_OPENS]); DRM_LOCK(); device_busy(dev->device); if (!dev->open_count++) retcode = drm_firstopen(dev); DRM_UNLOCK(); } return retcode; } void drm_close(void *data) { struct drm_file *file_priv = data; struct drm_device *dev = file_priv->dev; int retcode = 0; DRM_DEBUG("open_count = %d\n", dev->open_count); DRM_LOCK(); if (dev->driver->preclose != NULL) dev->driver->preclose(dev, file_priv); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", DRM_CURRENTPID, (long)dev->device, dev->open_count); if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && dev->lock.file_priv == file_priv) { DRM_DEBUG("Process %d dead, freeing lock for context %d\n", DRM_CURRENTPID, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); if (dev->driver->reclaim_buffers_locked != NULL) dev->driver->reclaim_buffers_locked(dev, file_priv); drm_lock_free(&dev->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X server. */ } else if (dev->driver->reclaim_buffers_locked != NULL && dev->lock.hw_lock != NULL) { /* The lock is required to reclaim buffers */ for (;;) { if (!dev->lock.hw_lock) { /* Device has been unregistered */ retcode = EINTR; break; } if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) { dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } /* Contention */ retcode = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock, PCATCH, "drmlk2", 0); if (retcode) break; } if (retcode == 0) { dev->driver->reclaim_buffers_locked(dev, file_priv); drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); } } if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) drm_reclaim_buffers(dev, file_priv); funsetown(&dev->buf_sigio); if (dev->driver->postclose != NULL) dev->driver->postclose(dev, file_priv); TAILQ_REMOVE(&dev->files, file_priv, link); free(file_priv, DRM_MEM_FILES); /* ======================================================== * End inline drm_release */ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); device_unbusy(dev->device); if (--dev->open_count == 0) { retcode = drm_lastclose(dev); } DRM_UNLOCK(); } /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, DRM_STRUCTPROC *p) { struct drm_device *dev = drm_get_device_from_kdev(kdev); int retcode = 0; drm_ioctl_desc_t *ioctl; int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv); int nr = DRM_IOCTL_NR(cmd); int is_driver_ioctl = 0; struct drm_file *file_priv; retcode = devfs_get_cdevpriv((void **)&file_priv); if (retcode != 0) { DRM_ERROR("can't find authenticator\n"); return EINVAL; } atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n", DRM_CURRENTPID, cmd, nr, (long)dev->device, file_priv->authenticated); switch (cmd) { case FIONBIO: case FIOASYNC: return 0; case FIOSETOWN: return fsetown(*(int *)data, &dev->buf_sigio); case FIOGETOWN: *(int *) data = fgetown(&dev->buf_sigio); return 0; } if (IOCGROUP(cmd) != DRM_IOCTL_BASE) { DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd)); return EINVAL; } ioctl = &drm_ioctls[nr]; /* It's not a core DRM ioctl, try driver-specific. */ if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) { /* The array entries begin at DRM_COMMAND_BASE ioctl nr */ nr -= DRM_COMMAND_BASE; if (nr > dev->driver->max_ioctl) { DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n", nr, dev->driver->max_ioctl); return EINVAL; } ioctl = &dev->driver->ioctls[nr]; is_driver_ioctl = 1; } func = ioctl->func; if (func == NULL) { DRM_DEBUG("no function\n"); return EINVAL; } if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) || ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || ((ioctl->flags & DRM_MASTER) && !file_priv->master)) return EACCES; if (is_driver_ioctl) { DRM_LOCK(); /* shared code returns -errno */ retcode = -func(dev, data, file_priv); DRM_UNLOCK(); } else { retcode = func(dev, data, file_priv); } if (retcode != 0) DRM_DEBUG(" returning %d\n", retcode); return retcode; } drm_local_map_t *drm_getsarea(struct drm_device *dev) { drm_local_map_t *map; DRM_SPINLOCK_ASSERT(&dev->dev_lock); TAILQ_FOREACH(map, &dev->maplist, link) { if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK)) return map; } return NULL; } #if DRM_LINUX #include MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1); #define LINUX_IOCTL_DRM_MIN 0x6400 #define LINUX_IOCTL_DRM_MAX 0x64ff static linux_ioctl_function_t drm_linux_ioctl; static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_register_handler, &drm_handler); SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_unregister_handler, &drm_handler); /* The bits for in/out are switched on Linux */ #define LINUX_IOC_IN IOC_OUT #define LINUX_IOC_OUT IOC_IN static int drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) { int error; int cmd = args->cmd; args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT); if (cmd & LINUX_IOC_IN) args->cmd |= IOC_IN; if (cmd & LINUX_IOC_OUT) args->cmd |= IOC_OUT; error = ioctl(p, (struct ioctl_args *)args); return error; } #endif /* DRM_LINUX */ Index: head/sys/dev/drm2/drm_os_freebsd.c =================================================================== --- head/sys/dev/drm2/drm_os_freebsd.c (revision 338347) +++ head/sys/dev/drm2/drm_os_freebsd.c (revision 338348) @@ -1,499 +1,500 @@ #include __FBSDID("$FreeBSD$"); #include #include #include devclass_t drm_devclass; MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures"); MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures"); MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures"); MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures"); MALLOC_DEFINE(DRM_MEM_MINOR, "drm_minor", "DRM MINOR Data Structures"); MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures"); MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures"); MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures"); MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures"); MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures"); MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures"); MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures"); MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures"); MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures"); MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures"); MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures"); MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap", "DRM CTXBITMAP Data Structures"); MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures"); MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures"); MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures"); MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures"); MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data"); const char *fb_mode_option = NULL; #define NSEC_PER_USEC 1000L #define NSEC_PER_SEC 1000000000L int64_t timeval_to_ns(const struct timeval *tv) { return ((int64_t)tv->tv_sec * NSEC_PER_SEC) + tv->tv_usec * NSEC_PER_USEC; } struct timeval ns_to_timeval(const int64_t nsec) { struct timeval tv; long rem; if (nsec == 0) { tv.tv_sec = 0; tv.tv_usec = 0; return (tv); } tv.tv_sec = nsec / NSEC_PER_SEC; rem = nsec % NSEC_PER_SEC; if (rem < 0) { tv.tv_sec--; rem += NSEC_PER_SEC; } tv.tv_usec = rem / 1000; return (tv); } /* Copied from OFED. */ unsigned long drm_linux_timer_hz_mask; static void drm_linux_timer_init(void *arg) { /* * Compute an internal HZ value which can divide 2**32 to * avoid timer rounding problems when the tick value wraps * around 2**32: */ drm_linux_timer_hz_mask = 1; while (drm_linux_timer_hz_mask < (unsigned long)hz) drm_linux_timer_hz_mask *= 2; drm_linux_timer_hz_mask--; } SYSINIT(drm_linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, drm_linux_timer_init, NULL); static const drm_pci_id_list_t * drm_find_description(int vendor, int device, const drm_pci_id_list_t *idlist) { int i = 0; for (i = 0; idlist[i].vendor != 0; i++) { if ((idlist[i].vendor == vendor) && ((idlist[i].device == device) || (idlist[i].device == 0))) { return (&idlist[i]); } } return (NULL); } /* * drm_probe_helper: called by a driver at the end of its probe * method. */ int drm_probe_helper(device_t kdev, const drm_pci_id_list_t *idlist) { const drm_pci_id_list_t *id_entry; int vendor, device; vendor = pci_get_vendor(kdev); device = pci_get_device(kdev); if (pci_get_class(kdev) != PCIC_DISPLAY || (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA && pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER)) return (-ENXIO); id_entry = drm_find_description(vendor, device, idlist); if (id_entry != NULL) { if (device_get_desc(kdev) == NULL) { DRM_DEBUG("%s desc: %s\n", device_get_nameunit(kdev), id_entry->name); device_set_desc(kdev, id_entry->name); } - return (0); + DRM_OBSOLETE(kdev); + return (-BUS_PROBE_GENERIC); } return (-ENXIO); } /* * drm_attach_helper: called by a driver at the end of its attach * method. */ int drm_attach_helper(device_t kdev, const drm_pci_id_list_t *idlist, struct drm_driver *driver) { struct drm_device *dev; int vendor, device; int ret; dev = device_get_softc(kdev); vendor = pci_get_vendor(kdev); device = pci_get_device(kdev); dev->id_entry = drm_find_description(vendor, device, idlist); ret = drm_get_pci_dev(kdev, dev, driver); return (ret); } int drm_generic_suspend(device_t kdev) { struct drm_device *dev; int error; DRM_DEBUG_KMS("Starting suspend\n"); dev = device_get_softc(kdev); if (dev->driver->suspend) { pm_message_t state; state.event = PM_EVENT_SUSPEND; error = -dev->driver->suspend(dev, state); if (error) goto out; } error = bus_generic_suspend(kdev); out: DRM_DEBUG_KMS("Finished suspend: %d\n", error); return error; } int drm_generic_resume(device_t kdev) { struct drm_device *dev; int error; DRM_DEBUG_KMS("Starting resume\n"); dev = device_get_softc(kdev); if (dev->driver->resume) { error = -dev->driver->resume(dev); if (error) goto out; } error = bus_generic_resume(kdev); out: DRM_DEBUG_KMS("Finished resume: %d\n", error); return error; } int drm_generic_detach(device_t kdev) { struct drm_device *dev; int i; dev = device_get_softc(kdev); drm_put_dev(dev); /* Clean up PCI resources allocated by drm_bufs.c. We're not really * worried about resource consumption while the DRM is inactive (between * lastclose and firstopen or unload) because these aren't actually * taking up KVA, just keeping the PCI resource allocated. */ for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) { if (dev->pcir[i] == NULL) continue; bus_release_resource(dev->dev, SYS_RES_MEMORY, dev->pcirid[i], dev->pcir[i]); dev->pcir[i] = NULL; } if (pci_disable_busmaster(dev->dev)) DRM_ERROR("Request to disable bus-master failed.\n"); return (0); } int drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top) { struct sysctl_oid *oid; snprintf(dev->busid_str, sizeof(dev->busid_str), "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func); oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid", CTLFLAG_RD, dev->busid_str, 0, NULL); if (oid == NULL) return (-ENOMEM); dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0; oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL); if (oid == NULL) return (-ENOMEM); return (0); } static int drm_device_find_capability(struct drm_device *dev, int cap) { return (pci_find_cap(dev->dev, cap, NULL) == 0); } int drm_pci_device_is_agp(struct drm_device *dev) { if (dev->driver->device_is_agp != NULL) { int ret; /* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely * AGP, 2 = fall back to PCI capability */ ret = (*dev->driver->device_is_agp)(dev); if (ret != DRM_MIGHT_BE_AGP) return ret; } return (drm_device_find_capability(dev, PCIY_AGP)); } int drm_pci_device_is_pcie(struct drm_device *dev) { return (drm_device_find_capability(dev, PCIY_EXPRESS)); } static bool dmi_found(const struct dmi_system_id *dsi) { char *hw_vendor, *hw_prod; int i, slot; bool res; hw_vendor = kern_getenv("smbios.planar.maker"); hw_prod = kern_getenv("smbios.planar.product"); res = true; for (i = 0; i < nitems(dsi->matches); i++) { slot = dsi->matches[i].slot; switch (slot) { case DMI_NONE: break; case DMI_SYS_VENDOR: case DMI_BOARD_VENDOR: if (hw_vendor != NULL && !strcmp(hw_vendor, dsi->matches[i].substr)) { break; } else { res = false; goto out; } case DMI_PRODUCT_NAME: case DMI_BOARD_NAME: if (hw_prod != NULL && !strcmp(hw_prod, dsi->matches[i].substr)) { break; } else { res = false; goto out; } default: res = false; goto out; } } out: freeenv(hw_vendor); freeenv(hw_prod); return (res); } bool dmi_check_system(const struct dmi_system_id *sysid) { const struct dmi_system_id *dsi; bool res; for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { if (dmi_found(dsi)) { res = true; if (dsi->callback != NULL && dsi->callback(dsi)) break; } } return (res); } #if __OS_HAS_MTRR int drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags) { int act; struct mem_range_desc mrdesc; mrdesc.mr_base = offset; mrdesc.mr_len = size; mrdesc.mr_flags = flags; act = MEMRANGE_SET_UPDATE; strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner)); return (-mem_range_attr_set(&mrdesc, &act)); } int drm_mtrr_del(int handle __unused, unsigned long offset, unsigned long size, unsigned int flags) { int act; struct mem_range_desc mrdesc; mrdesc.mr_base = offset; mrdesc.mr_len = size; mrdesc.mr_flags = flags; act = MEMRANGE_SET_REMOVE; strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner)); return (-mem_range_attr_set(&mrdesc, &act)); } #endif void drm_clflush_pages(vm_page_t *pages, unsigned long num_pages) { #if defined(__i386__) || defined(__amd64__) pmap_invalidate_cache_pages(pages, num_pages); #else DRM_ERROR("drm_clflush_pages not implemented on this architecture"); #endif } void drm_clflush_virt_range(char *addr, unsigned long length) { #if defined(__i386__) || defined(__amd64__) pmap_invalidate_cache_range((vm_offset_t)addr, (vm_offset_t)addr + length, TRUE); #else DRM_ERROR("drm_clflush_virt_range not implemented on this architecture"); #endif } void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii __unused) { int i, j, c; i = j = 0; while (i < len && j <= linebuflen) { c = ((const char *)buf)[i]; if (i != 0) { if (i % rowsize == 0) { /* Newline required. */ sprintf(linebuf + j, "\n"); ++j; } else if (i % groupsize == 0) { /* Space required. */ sprintf(linebuf + j, " "); ++j; } } if (j > linebuflen - 4) break; sprintf(linebuf + j, "%02X", c); j += 2; ++i; } if (j <= linebuflen) sprintf(linebuf + j, "\n"); } #if DRM_LINUX #include MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1); #define LINUX_IOCTL_DRM_MIN 0x6400 #define LINUX_IOCTL_DRM_MAX 0x64ff static linux_ioctl_function_t drm_linux_ioctl; static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; /* The bits for in/out are switched on Linux */ #define LINUX_IOC_IN IOC_OUT #define LINUX_IOC_OUT IOC_IN static int drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) { int error; int cmd = args->cmd; args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT); if (cmd & LINUX_IOC_IN) args->cmd |= IOC_IN; if (cmd & LINUX_IOC_OUT) args->cmd |= IOC_OUT; error = ioctl(p, (struct ioctl_args *)args); return error; } #endif /* DRM_LINUX */ static int drm_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: TUNABLE_INT_FETCH("drm.debug", &drm_debug); TUNABLE_INT_FETCH("drm.notyet", &drm_notyet); break; } return (0); } static moduledata_t drm_mod = { "drmn", drm_modevent, 0 }; DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); MODULE_VERSION(drmn, 1); MODULE_DEPEND(drmn, agp, 1, 1, 1); MODULE_DEPEND(drmn, pci, 1, 1, 1); MODULE_DEPEND(drmn, mem, 1, 1, 1); MODULE_DEPEND(drmn, iicbus, 1, 1, 1); Index: head/sys/dev/drm2/drm_os_freebsd.h =================================================================== --- head/sys/dev/drm2/drm_os_freebsd.h (revision 338347) +++ head/sys/dev/drm2/drm_os_freebsd.h (revision 338348) @@ -1,704 +1,718 @@ /** * \file drm_os_freebsd.h * OS abstraction macros. */ #include __FBSDID("$FreeBSD$"); #ifndef _DRM_OS_FREEBSD_H_ #define _DRM_OS_FREEBSD_H_ #include #include #if _BYTE_ORDER == _BIG_ENDIAN #define __BIG_ENDIAN 4321 #else #define __LITTLE_ENDIAN 1234 #endif #ifdef __LP64__ #define BITS_PER_LONG 64 #else #define BITS_PER_LONG 32 #endif #ifndef __user #define __user #endif #ifndef __iomem #define __iomem #endif #ifndef __always_unused #define __always_unused #endif #ifndef __must_check #define __must_check #endif #ifndef __force #define __force #endif #ifndef uninitialized_var #define uninitialized_var(x) x #endif #define cpu_to_le16(x) htole16(x) #define le16_to_cpu(x) le16toh(x) #define cpu_to_le32(x) htole32(x) #define le32_to_cpu(x) le32toh(x) #define cpu_to_be16(x) htobe16(x) #define be16_to_cpu(x) be16toh(x) #define cpu_to_be32(x) htobe32(x) #define be32_to_cpu(x) be32toh(x) #define be32_to_cpup(x) be32toh(*x) typedef vm_paddr_t dma_addr_t; typedef vm_paddr_t resource_size_t; #define wait_queue_head_t atomic_t typedef uint64_t u64; typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; typedef int64_t s64; typedef int32_t s32; typedef int16_t s16; typedef int8_t s8; typedef uint16_t __le16; typedef uint32_t __le32; typedef uint64_t __le64; typedef uint16_t __be16; typedef uint32_t __be32; typedef uint64_t __be64; #define DRM_IRQ_ARGS void *arg typedef void irqreturn_t; #define IRQ_HANDLED /* nothing */ #define IRQ_NONE /* nothing */ #define __init #define __exit #define BUILD_BUG_ON(x) CTASSERT(!(x)) #define BUILD_BUG_ON_NOT_POWER_OF_2(x) #ifndef WARN #define WARN(condition, format, ...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ DRM_ERROR(format, ##__VA_ARGS__); \ unlikely(__ret_warn_on); \ }) #endif #define WARN_ONCE(condition, format, ...) \ WARN(condition, format, ##__VA_ARGS__) #define WARN_ON(cond) WARN(cond, "WARN ON: " #cond) #define WARN_ON_SMP(cond) WARN_ON(cond) #define BUG() panic("BUG") #define BUG_ON(cond) KASSERT(!(cond), ("BUG ON: " #cond " -> 0x%jx", (uintmax_t)(cond))) #define unlikely(x) __builtin_expect(!!(x), 0) #define likely(x) __builtin_expect(!!(x), 1) #define container_of(ptr, type, member) ({ \ __typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) #define KHZ2PICOS(a) (1000000000UL/(a)) #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) #define HZ hz #define DRM_HZ hz #define DRM_CURRENTPID curthread->td_proc->p_pid #define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0) #define udelay(usecs) DELAY(usecs) #define mdelay(msecs) do { int loops = (msecs); \ while (loops--) DELAY(1000); \ } while (0) #define DRM_UDELAY(udelay) DELAY(udelay) #define drm_msleep(x, msg) pause((msg), ((int64_t)(x)) * hz / 1000) #define DRM_MSLEEP(msecs) drm_msleep((msecs), "drm_msleep") #define get_seconds() time_second #define ioread8(addr) *(volatile uint8_t *)((char *)addr) #define ioread16(addr) *(volatile uint16_t *)((char *)addr) #define ioread32(addr) *(volatile uint32_t *)((char *)addr) #define iowrite8(data, addr) *(volatile uint8_t *)((char *)addr) = data; #define iowrite16(data, addr) *(volatile uint16_t *)((char *)addr) = data; #define iowrite32(data, addr) *(volatile uint32_t *)((char *)addr) = data; #define DRM_READ8(map, offset) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) #define DRM_READ16(map, offset) \ le16toh(*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset))) #define DRM_READ32(map, offset) \ le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset))) #define DRM_READ64(map, offset) \ le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset))) #define DRM_WRITE8(map, offset, val) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = val #define DRM_WRITE16(map, offset, val) \ *(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = htole16(val) #define DRM_WRITE32(map, offset, val) \ *(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = htole32(val) #define DRM_WRITE64(map, offset, val) \ *(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \ (vm_offset_t)(offset)) = htole64(val) +#ifdef __LP64__ +#define DRM_PORT "graphics/drm-stable-kmod" +#else +#define DRM_PORT "graphics/drm-legacy-kmod" +#endif + +#define DRM_OBSOLETE(dev) \ + do { \ + device_printf(dev, "=======================================================\n"); \ + device_printf(dev, "This code is obsolete abandonware. Install the " DRM_PORT " pkg\n"); \ + device_printf(dev, "=======================================================\n"); \ + gone_in_dev(dev, 13, "drm2 drivers"); \ + } while (0) + /* DRM_READMEMORYBARRIER() prevents reordering of reads. * DRM_WRITEMEMORYBARRIER() prevents reordering of writes. * DRM_MEMORYBARRIER() prevents reordering of reads and writes. */ #define DRM_READMEMORYBARRIER() rmb() #define DRM_WRITEMEMORYBARRIER() wmb() #define DRM_MEMORYBARRIER() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_mb__before_atomic_inc() mb() #define smp_mb__after_atomic_inc() mb() #define barrier() __compiler_membar() #define do_div(a, b) ((a) /= (b)) #define div64_u64(a, b) ((a) / (b)) #define lower_32_bits(n) ((u32)(n)) #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) #define __set_bit(n, s) set_bit((n), (s)) #define __clear_bit(n, s) clear_bit((n), (s)) #define min_t(type, x, y) ({ \ type __min1 = (x); \ type __min2 = (y); \ __min1 < __min2 ? __min1 : __min2; }) #define max_t(type, x, y) ({ \ type __max1 = (x); \ type __max2 = (y); \ __max1 > __max2 ? __max1 : __max2; }) #define memset_io(a, b, c) memset((a), (b), (c)) #define memcpy_fromio(a, b, c) memcpy((a), (b), (c)) #define memcpy_toio(a, b, c) memcpy((a), (b), (c)) #define VERIFY_READ VM_PROT_READ #define VERIFY_WRITE VM_PROT_WRITE #define access_ok(prot, p, l) useracc((p), (l), (prot)) /* XXXKIB what is the right code for the FreeBSD ? */ /* kib@ used ENXIO here -- dumbbell@ */ #define EREMOTEIO EIO #define ERESTARTSYS 512 /* Same value as Linux. */ #define KTR_DRM KTR_DEV #define KTR_DRM_REG KTR_SPARE3 #define DRM_AGP_KERN struct agp_info #define DRM_AGP_MEM void #define PCI_VENDOR_ID_APPLE 0x106b #define PCI_VENDOR_ID_ASUSTEK 0x1043 #define PCI_VENDOR_ID_ATI 0x1002 #define PCI_VENDOR_ID_DELL 0x1028 #define PCI_VENDOR_ID_HP 0x103c #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_VENDOR_ID_SERVERWORKS 0x1166 #define PCI_VENDOR_ID_SONY 0x104d #define PCI_VENDOR_ID_VIA 0x1106 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define DIV_ROUND_CLOSEST(n,d) (((n) + (d) / 2) / (d)) #define div_u64(n, d) ((n) / (d)) #define hweight32(i) bitcount32(i) static inline unsigned long roundup_pow_of_two(unsigned long x) { return (1UL << flsl(x - 1)); } /** * ror32 - rotate a 32-bit value right * @word: value to rotate * @shift: bits to roll * * Source: include/linux/bitops.h */ static inline uint32_t ror32(uint32_t word, unsigned int shift) { return (word >> shift) | (word << (32 - shift)); } #define IS_ALIGNED(x, y) (((x) & ((y) - 1)) == 0) #define round_down(x, y) rounddown2((x), (y)) #define round_up(x, y) roundup2((x), (y)) #define get_unaligned(ptr) \ ({ __typeof__(*(ptr)) __tmp; \ memcpy(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) #if _BYTE_ORDER == _LITTLE_ENDIAN /* Taken from linux/include/linux/unaligned/le_struct.h. */ struct __una_u32 { u32 x; } __packed; static inline u32 __get_unaligned_cpu32(const void *p) { const struct __una_u32 *ptr = (const struct __una_u32 *)p; return (ptr->x); } static inline u32 get_unaligned_le32(const void *p) { return (__get_unaligned_cpu32((const u8 *)p)); } #else /* Taken from linux/include/linux/unaligned/le_byteshift.h. */ static inline u32 __get_unaligned_le32(const u8 *p) { return (p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24); } static inline u32 get_unaligned_le32(const void *p) { return (__get_unaligned_le32((const u8 *)p)); } #endif static inline unsigned long ilog2(unsigned long x) { return (flsl(x) - 1); } static inline int64_t abs64(int64_t x) { return (x < 0 ? -x : x); } int64_t timeval_to_ns(const struct timeval *tv); struct timeval ns_to_timeval(const int64_t nsec); #define PAGE_ALIGN(addr) round_page(addr) #define page_to_phys(x) VM_PAGE_TO_PHYS(x) #define offset_in_page(x) ((x) & PAGE_MASK) #define drm_get_device_from_kdev(_kdev) (((struct drm_minor *)(_kdev)->si_drv1)->dev) #define DRM_IOC_VOID IOC_VOID #define DRM_IOC_READ IOC_OUT #define DRM_IOC_WRITE IOC_IN #define DRM_IOC_READWRITE IOC_INOUT #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) static inline long __copy_to_user(void __user *to, const void *from, unsigned long n) { return (copyout(from, to, n) != 0 ? n : 0); } #define copy_to_user(to, from, n) __copy_to_user((to), (from), (n)) static inline int __put_user(size_t size, void *ptr, void *x) { size = copy_to_user(ptr, x, size); return (size ? -EFAULT : size); } #define put_user(x, ptr) __put_user(sizeof(*ptr), (ptr), &(x)) static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { return ((copyin(__DECONST(void *, from), to, n) != 0 ? n : 0)); } #define copy_from_user(to, from, n) __copy_from_user((to), (from), (n)) static inline int __get_user(size_t size, const void *ptr, void *x) { size = copy_from_user(x, ptr, size); return (size ? -EFAULT : size); } #define get_user(x, ptr) __get_user(sizeof(*ptr), (ptr), &(x)) static inline int __copy_to_user_inatomic(void __user *to, const void *from, unsigned n) { return (copyout_nofault(from, to, n) != 0 ? n : 0); } #define __copy_to_user_inatomic_nocache(to, from, n) \ __copy_to_user_inatomic((to), (from), (n)) static inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { /* * XXXKIB. Equivalent Linux function is implemented using * MOVNTI for aligned moves. For unaligned head and tail, * normal move is performed. As such, it is not incorrect, if * only somewhat slower, to use normal copyin. All uses * except shmem_pwrite_fast() have the destination mapped WC. */ return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0)); } #define __copy_from_user_inatomic_nocache(to, from, n) \ __copy_from_user_inatomic((to), (from), (n)) static inline int fault_in_multipages_readable(const char __user *uaddr, int size) { char c; int ret = 0; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return ret; while (uaddr <= end) { ret = -copyin(uaddr, &c, 1); if (ret != 0) return -EFAULT; uaddr += PAGE_SIZE; } /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & ~PAGE_MASK) == ((unsigned long)end & ~PAGE_MASK)) { ret = -copyin(end, &c, 1); } return ret; } static inline int fault_in_multipages_writeable(char __user *uaddr, int size) { int ret = 0; char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return ret; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ while (uaddr <= end) { ret = subyte(uaddr, 0); if (ret != 0) return -EFAULT; uaddr += PAGE_SIZE; } /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & ~PAGE_MASK) == ((unsigned long)end & ~PAGE_MASK)) ret = subyte(end, 0); return ret; } enum __drm_capabilities { CAP_SYS_ADMIN }; static inline bool capable(enum __drm_capabilities cap) { switch (cap) { case CAP_SYS_ADMIN: return DRM_SUSER(curthread); default: panic("%s: unhandled capability: %0x", __func__, cap); return (false); } } #define to_user_ptr(x) ((void *)(uintptr_t)(x)) #define sigemptyset(set) SIGEMPTYSET(set) #define sigaddset(set, sig) SIGADDSET(set, sig) #define DRM_LOCK(dev) sx_xlock(&(dev)->dev_struct_lock) #define DRM_UNLOCK(dev) sx_xunlock(&(dev)->dev_struct_lock) extern unsigned long drm_linux_timer_hz_mask; #define jiffies ticks #define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz) #define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000) #define timespec_to_jiffies(x) (((x)->tv_sec * 1000000 + (x)->tv_nsec) * hz / 1000000) #define time_after(a,b) ((long)(b) - (long)(a) < 0) #define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0) #define round_jiffies(j) ((unsigned long)(((j) + drm_linux_timer_hz_mask) & ~drm_linux_timer_hz_mask)) #define round_jiffies_up(j) round_jiffies(j) /* TODO */ #define round_jiffies_up_relative(j) round_jiffies_up(j) /* TODO */ #define getrawmonotonic(ts) getnanouptime(ts) #define wake_up(queue) wakeup_one((void *)queue) #define wake_up_interruptible(queue) wakeup_one((void *)queue) #define wake_up_all(queue) wakeup((void *)queue) #define wake_up_interruptible_all(queue) wakeup((void *)queue) struct completion { unsigned int done; struct mtx lock; }; #define INIT_COMPLETION(c) ((c).done = 0); static inline void init_completion(struct completion *c) { mtx_init(&c->lock, "drmcompl", NULL, MTX_DEF); c->done = 0; } static inline void free_completion(struct completion *c) { mtx_destroy(&c->lock); } static inline void complete_all(struct completion *c) { mtx_lock(&c->lock); c->done++; mtx_unlock(&c->lock); wakeup(c); } static inline long wait_for_completion_interruptible_timeout(struct completion *c, unsigned long timeout) { unsigned long start_jiffies, elapsed_jiffies; bool timeout_expired = false, awakened = false; long ret = timeout; start_jiffies = ticks; mtx_lock(&c->lock); while (c->done == 0 && !timeout_expired) { ret = -msleep(c, &c->lock, PCATCH, "drmwco", timeout); switch(ret) { case -EWOULDBLOCK: timeout_expired = true; ret = 0; break; case -EINTR: case -ERESTART: ret = -ERESTARTSYS; break; case 0: awakened = true; break; } } mtx_unlock(&c->lock); if (awakened) { elapsed_jiffies = ticks - start_jiffies; ret = timeout > elapsed_jiffies ? timeout - elapsed_jiffies : 1; } return (ret); } MALLOC_DECLARE(DRM_MEM_DMA); MALLOC_DECLARE(DRM_MEM_SAREA); MALLOC_DECLARE(DRM_MEM_DRIVER); MALLOC_DECLARE(DRM_MEM_MAGIC); MALLOC_DECLARE(DRM_MEM_MINOR); MALLOC_DECLARE(DRM_MEM_IOCTLS); MALLOC_DECLARE(DRM_MEM_MAPS); MALLOC_DECLARE(DRM_MEM_BUFS); MALLOC_DECLARE(DRM_MEM_SEGS); MALLOC_DECLARE(DRM_MEM_PAGES); MALLOC_DECLARE(DRM_MEM_FILES); MALLOC_DECLARE(DRM_MEM_QUEUES); MALLOC_DECLARE(DRM_MEM_CMDS); MALLOC_DECLARE(DRM_MEM_MAPPINGS); MALLOC_DECLARE(DRM_MEM_BUFLISTS); MALLOC_DECLARE(DRM_MEM_AGPLISTS); MALLOC_DECLARE(DRM_MEM_CTXBITMAP); MALLOC_DECLARE(DRM_MEM_SGLISTS); MALLOC_DECLARE(DRM_MEM_MM); MALLOC_DECLARE(DRM_MEM_HASHTAB); MALLOC_DECLARE(DRM_MEM_KMS); MALLOC_DECLARE(DRM_MEM_VBLANK); #define simple_strtol(a, b, c) strtol((a), (b), (c)) typedef struct drm_pci_id_list { int vendor; int device; long driver_private; char *name; } drm_pci_id_list_t; #ifdef __i386__ #define CONFIG_X86 1 #endif #ifdef __amd64__ #define CONFIG_X86 1 #define CONFIG_X86_64 1 #endif #ifdef __ia64__ #define CONFIG_IA64 1 #endif #if defined(__i386__) || defined(__amd64__) #define CONFIG_ACPI #define CONFIG_DRM_I915_KMS #undef CONFIG_INTEL_IOMMU #endif #ifdef COMPAT_FREEBSD32 #define CONFIG_COMPAT #endif #ifndef __arm__ #define CONFIG_AGP 1 #define CONFIG_MTRR 1 #endif #define CONFIG_FB 1 extern const char *fb_mode_option; #undef CONFIG_DEBUG_FS #undef CONFIG_VGA_CONSOLE #define EXPORT_SYMBOL(x) #define EXPORT_SYMBOL_GPL(x) #define MODULE_AUTHOR(author) #define MODULE_DESCRIPTION(desc) #define MODULE_LICENSE(license) #define MODULE_PARM_DESC(name, desc) #define MODULE_DEVICE_TABLE(name, list) #define module_param_named(name, var, type, perm) #define printk printf #define pr_err DRM_ERROR #define pr_warn DRM_WARNING #define pr_warn_once DRM_WARNING #define KERN_DEBUG "" /* I2C compatibility. */ #define I2C_M_RD IIC_M_RD #define I2C_M_WR IIC_M_WR #define I2C_M_NOSTART IIC_M_NOSTART struct fb_info * framebuffer_alloc(void); void framebuffer_release(struct fb_info *info); #define console_lock() #define console_unlock() #define console_trylock() true #define PM_EVENT_SUSPEND 0x0002 #define PM_EVENT_QUIESCE 0x0008 #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE typedef struct pm_message { int event; } pm_message_t; static inline int pci_read_config_byte(device_t kdev, int where, u8 *val) { *val = (u8)pci_read_config(kdev, where, 1); return (0); } static inline int pci_write_config_byte(device_t kdev, int where, u8 val) { pci_write_config(kdev, where, val, 1); return (0); } static inline int pci_read_config_word(device_t kdev, int where, uint16_t *val) { *val = (uint16_t)pci_read_config(kdev, where, 2); return (0); } static inline int pci_write_config_word(device_t kdev, int where, uint16_t val) { pci_write_config(kdev, where, val, 2); return (0); } static inline int pci_read_config_dword(device_t kdev, int where, uint32_t *val) { *val = (uint32_t)pci_read_config(kdev, where, 4); return (0); } static inline int pci_write_config_dword(device_t kdev, int where, uint32_t val) { pci_write_config(kdev, where, val, 4); return (0); } static inline void on_each_cpu(void callback(void *data), void *data, int wait) { smp_rendezvous(NULL, callback, NULL, data); } void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii); #define KIB_NOTYET() \ do { \ if (drm_debug && drm_notyet) \ printf("NOTYET: %s at %s:%d\n", __func__, __FILE__, __LINE__); \ } while (0) #endif /* _DRM_OS_FREEBSD_H_ */